hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
5f7776b184cd279cad751b395354da54e4419f28
14,297
py
Python
tests/test_py_ev.py
JBielan/py_ev
0a2d48235b8ff2268254c151a179a2ece40cbd37
[ "MIT" ]
5
2020-03-31T17:06:29.000Z
2020-07-01T22:43:42.000Z
tests/test_py_ev.py
JBielan/py_ev
0a2d48235b8ff2268254c151a179a2ece40cbd37
[ "MIT" ]
null
null
null
tests/test_py_ev.py
JBielan/py_ev
0a2d48235b8ff2268254c151a179a2ece40cbd37
[ "MIT" ]
null
null
null
from py_ev.py_ev import Evaluator ev = Evaluator() def test_reset(): ev.reset() assert len(ev.deck) == 52 assert ev.board == [] def test_build_deck(): ev.reset() assert len(ev.build_deck()) == 52 assert ev.new_deck == [(2, 1), (2, 2), (2, 3), (2, 4), (3, 1), (3, 2), (3, 3), (3, 4), (4, 1), (4, 2), (4, 3), (4, 4), (5, 1), (5, 2), (5, 3), (5, 4), (6, 1), (6, 2), (6, 3), (6, 4), (7, 1), (7, 2), (7, 3), (7, 4), (8, 1), (8, 2), (8, 3), (8, 4), (9, 1), (9, 2), (9, 3), (9, 4), (10, 1), (10, 2), (10, 3), (10, 4), (11, 1), (11, 2), (11, 3), (11, 4), (12, 1), (12, 2), (12, 3), (12, 4), (13, 1), (13, 2), (13, 3), (13, 4), (14, 1), (14, 2), (14, 3), (14, 4)] def test_deal(): ev.reset() assert len(ev.deal(5)) == 5 assert len(ev.deck) == 47 def test_set_cards(): ev.reset() ev.board = ev.set_cards((14, 3), (2, 1), (12, 2)) assert ev.board == [(14, 3), (2, 1), (12, 2)] assert len(ev.deck) == 49 def test_analyze_board(): ev.reset() cards = [(6, 2), (6, 3), (3, 3), (2, 1)] board = [(7, 4), (5, 2), (12, 1), (13, 4), (7, 2)] total, pairness, suitness = ev.analyze_board(cards, board) assert pairness == {14: 0, 13: 1, 12: 1, 11: 0, 10: 0, 9: 0, 8: 0, 7: 2, 6: 2, 5: 1, 4: 0, 3: 1, 2: 1} assert suitness == {1: 2, 2: 3, 3: 2, 4: 2} assert total == [(13, 4), (12, 1), (7, 4), (7, 2), (6, 2), (6, 3), (5, 2), (3, 3), (2, 1)] def test_is_str8(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3), (9, 4), (14, 3)], [(2, 2), (3, 3), (8, 1), (7, 2), (4, 3)]) result = ev.is_str8(cards, pairness, suitness) assert result == (5, 11, 'Straight') cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3), (12, 4), (14, 3)], [(2, 2), (3, 3), (13, 1), (7, 2), (4, 3)]) result = ev.is_str8(cards, pairness, suitness) assert result == (5, 14, 'Straight') cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3), (5, 4), (14, 3)], [(2, 2), (3, 3), (13, 1), (7, 2), (4, 3)]) result = ev.is_str8(cards, pairness, suitness) assert result == (5, 5, 'Straight') cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3), (5, 4), (14, 3)], [(2, 2), (3, 3), (13, 1), (7, 2), (10, 3)]) result = ev.is_str8(cards, pairness, suitness) assert result == (False, 0, None) def test_is_flush(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (9, 2)], [(2, 2), (3, 2), (8, 1), (7, 2), (4, 3)]) result = ev.is_flush(cards, pairness, suitness) assert result == (6, 110000+9000+700+30+2, 'Flush') cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 2)], [(2, 2), (9, 2), (14, 2), (7, 2), (4, 2)]) result = ev.is_flush(cards, pairness, suitness) assert result == (6, 140000+11000+1000+90+7, 'Flush') cards, pairness, suitness = ev.analyze_board([(11, 2), (9, 2)], [(2, 2), (10, 2), (14, 2), (7, 3), (4, 3)]) result = ev.is_flush(cards, pairness, suitness) assert result == (6, 140000+11000+1000+90+2, 'Flush') cards, pairness, suitness = ev.analyze_board([(2, 2), (4, 2)], [(5, 2), (7, 2), (3, 2), (7, 4), (14, 2)]) result = ev.is_flush(cards, pairness, suitness) assert result == (6, 140000+7000+500+40+3, 'Flush') cards, pairness, suitness = ev.analyze_board([(2, 2), (3, 2)], [(5, 2), (7, 4), (8, 3), (7, 4), (4, 3)]) result = ev.is_flush(cards, pairness, suitness) assert result == (False, 0, None) def test_is_quad(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3), (9, 2), (14, 3)], [(2, 2), (3, 2), (8, 1), (7, 2), (4, 3)]) result = ev.is_quad(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3), (9, 2), (14, 3)], [(2, 2), (3, 2), (11, 1), (7, 2), (4, 3)]) result = ev.is_quad(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3), (9, 2), (14, 3)], [(2, 2), (3, 2), (11, 1), (7, 2), (11, 3)]) result = ev.is_quad(cards, pairness, suitness) assert result == (8, 110+14, 'Four of a Kind') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3), (14, 2), (14, 3)], [(2, 2), (14, 2), (11, 1), (14, 2), (11, 3)]) result = ev.is_quad(cards, pairness, suitness) assert result == (8, 140+11, 'Four of a Kind') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(2, 2), (11, 4), (11, 1)]) result = ev.is_quad(cards, pairness, suitness) assert result == (8, 110+2, 'Four of a Kind') def test_is_fullhouse(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(2, 2), (11, 1), (8, 1), (4, 2), (4, 3)]) result = ev.is_fullhouse(cards, pairness, suitness) assert result == (7, 110+4, 'Full House') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(12, 2), (11, 1), (12, 1), (12, 4), (4, 3)]) result = ev.is_fullhouse(cards, pairness, suitness) assert result == (7, 120+11, 'Full House') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(11, 4), (11, 1), (5, 1), (12, 4), (4, 3)]) result = ev.is_fullhouse(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(11, 4), (10, 1), (5, 1), (12, 4), (4, 3)]) result = ev.is_fullhouse(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(10, 4), (10, 1), (5, 1), (12, 4), (4, 3)]) result = ev.is_fullhouse(cards, pairness, suitness) assert result == (False, 0, None) def test_is_3_of_a_kind(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(2, 2), (11, 1), (8, 1), (3, 2), (4, 3)]) result = ev.is_3_of_a_kind(cards, pairness, suitness) assert result == (4, 1100+80+4, 'Three of a kind') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(2, 2), (10, 1), (8, 1), (3, 2), (4, 3)]) result = ev.is_3_of_a_kind(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (12, 3)], [(2, 2), (2, 1), (2, 3), (3, 2), (4, 3)]) result = ev.is_3_of_a_kind(cards, pairness, suitness) assert result == (4, 200+120+11, 'Three of a kind') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(2, 2), (2, 1), (5, 3), (3, 2), (4, 3)]) result = ev.is_3_of_a_kind(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(12, 4), (10, 1)], [(10, 4), (10, 2), (4, 1), (3, 2), (2, 3)]) result = ev.is_3_of_a_kind(cards, pairness, suitness) assert result == (4, 1000+120+4, 'Three of a kind') def test_is_2_pairs(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(12, 2), (12, 1), (3, 1), (3, 2), (4, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (3, 1314, 'Two pair') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(12, 2), (12, 1), (3, 1), (3, 2), (14, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (3, 1324, 'Two pair') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(10, 2), (12, 1), (3, 1), (3, 2), (14, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (3, 1144, 'Two pair') cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(10, 2), (12, 1), (2, 1), (3, 2), (14, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(10, 2), (11, 1), (2, 1), (3, 2), (14, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(11, 2), (11, 1), (2, 1), (3, 2), (14, 3)]) result = ev.is_2_pairs(cards, pairness, suitness) assert result == (False, 0, None) def test_is_pair(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (4, 3)]) result = ev.is_pair(cards, pairness, suitness) assert result == (2, 11000+1400+130+4, 'One pair') cards, pairness, suitness = ev.analyze_board([(10, 2), (11, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (4, 3)]) result = ev.is_pair(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (14, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (4, 3)]) result = ev.is_pair(cards, pairness, suitness) assert result == (2, 14000+1300+110+4, 'One pair') def test_is_air(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (11, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (4, 3)]) result = ev.is_air(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (4, 3)]) result = ev.is_air(cards, pairness, suitness) assert result == (1, 140000+13000+1100+100+4, 'High card') cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (2, 3)]) result = ev.is_air(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(11, 2), (10, 3)], [(13, 2), (14, 1), (2, 1), (3, 2), (8, 3)]) result = ev.is_air(cards, pairness, suitness) assert result == (1, 140000+13000+1100+100+8, 'High card') def test_is_str8_flush(): ev = Evaluator() cards, pairness, suitness = ev.analyze_board([(11, 2), (12, 2)], [(13, 2), (14, 2), (10, 2), (3, 2), (4, 3)]) result = ev.is_str8_flush(cards, pairness, suitness) assert result == (9, 14, 'Straight Flush') cards, pairness, suitness = ev.analyze_board([(12, 2), (12, 2)], [(13, 2), (14, 2), (10, 2), (3, 2), (4, 3)]) result = ev.is_str8_flush(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(12, 2), (13, 2)], [(13, 2), (8, 2), (10, 2), (3, 2), (4, 3)]) result = ev.is_str8_flush(cards, pairness, suitness) assert result == (False, 0, None) cards, pairness, suitness = ev.analyze_board([(14, 2), (5, 2)], [(13, 2), (8, 2), (2, 2), (3, 2), (4, 2)]) result = ev.is_str8_flush(cards, pairness, suitness) assert result == (9, 5, 'Straight Flush') cards, pairness, suitness = ev.analyze_board([(14, 2), (5, 2)], [(13, 2), (8, 2), (2, 2), (3, 3), (4, 2)]) result = ev.is_str8_flush(cards, pairness, suitness) assert result == (False, 0, None) def test_evaluate(): ev = Evaluator() assert ev.evaluate([(3, 2), (3, 3)], [(3, 4), (10, 1), (12, 3), (14, 1), (6, 4)]) > \ ev.evaluate([(10, 2), (12, 4)], [(3, 4), (10, 1), (12, 3), (14, 1), (6, 4)]) assert ev.evaluate([(3, 2), (3, 3)], [(3, 4), (10, 1), (12, 3), (14, 1), (6, 4)]) > \ ev.evaluate([(14, 2), (13, 4)], [(12, 4), (11, 1), (9, 3), (8, 1), (7, 4)]) assert ev.evaluate([(2, 2), (2, 3)], [(2, 4), (3, 1), (3, 3), (14, 1), (6, 4)]) > \ ev.evaluate([(14, 2), (13, 2)], [(12, 2), (11, 2), (9, 3), (8, 2), (7, 4)]) assert ev.evaluate([(2, 2), (2, 3)], [(4, 4), (3, 1), (5, 3), (7, 1), (8, 4)]) > \ ev.evaluate([(14, 2), (13, 2)], [(12, 2), (11, 2), (9, 3), (8, 1), (7, 4)]) def test_equity(): ev = Evaluator() h1, h2, draw = ev.equity(3000000, [(14, 4), (14, 2)], [(8, 3), (9, 3)]) assert 77.12 <= h1 <= 77.32 assert 22.37 <= h2 <= 22.57 # h1, h2, draw = ev.equity(3000000, [(10, 3), (11, 3)], [(8, 3), (9, 3)]) # assert 66.53 <= h1 <= 66.73 # assert 32.01 <= h2 <= 32.21 h1, h2, draw = ev.equity(3000000, [(2, 2), (3, 1)], [(4, 3), (5, 4)]) assert 29.7 <= h1 <= 29.9 assert 49.77 <= h2 <= 49.97
45.531847
106
0.446527
2,028
14,297
3.074458
0.052268
0.218124
0.282919
0.172414
0.852767
0.828869
0.8085
0.796472
0.784924
0.753007
0
0.15957
0.336364
14,297
314
107
45.531847
0.497576
0.008883
0
0.49789
0
0
0.016729
0
0
0
0
0
0.257384
1
0.067511
false
0
0.004219
0
0.07173
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
5f9f4594ecd3477b65068c7ab52c613673540387
164
py
Python
server/apps/stream/tests/__init__.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
server/apps/stream/tests/__init__.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
server/apps/stream/tests/__init__.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
from .test_helper import * from .test_stream_id import * from .test_stream_variable import * from .test_system_variable import * from .test_virtual_stream import *
27.333333
35
0.817073
24
164
5.208333
0.375
0.32
0.448
0.32
0
0
0
0
0
0
0
0
0.121951
164
5
36
32.8
0.868056
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
5fad5878a3175ce61ecd211a99683fd9e0030602
36,680
py
Python
app/tests/unit/test_conversion_parser.py
willsower/latex2speech
36a69bb5ee74e1ca362968604b4a554034c5f408
[ "MIT" ]
3
2021-03-17T22:13:23.000Z
2021-08-30T20:35:39.000Z
app/tests/unit/test_conversion_parser.py
willsower/latex2speech
36a69bb5ee74e1ca362968604b4a554034c5f408
[ "MIT" ]
50
2021-03-15T23:03:43.000Z
2021-07-14T14:22:45.000Z
app/tests/unit/test_conversion_parser.py
willsower/latex2speech
36a69bb5ee74e1ca362968604b4a554034c5f408
[ "MIT" ]
3
2021-03-30T18:18:40.000Z
2021-04-14T17:51:26.000Z
import unittest from unittest.mock import patch, Mock import xml.etree.ElementTree as ET import TexSoup from SSMLParsing.text_element import TextElement from SSMLParsing.root_element import RootElement from SSMLParsing.break_element import BreakElement from SSMLParsing.arg_element import ArgElement from SSMLParsing.content_element import ContentElement from SSMLParsing.emphasis_element import EmphasisElement from SSMLParsing.prosody_element import ProsodyElement import conversion_db from conversion_parser import ConversionParser class testConversionParser(unittest.TestCase): ''' Tests basic text replacement in commands and environments. ''' @patch('conversion_db.ConversionDB') def testTextElement(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': return [TextElement('text 1')] else: return None def mockEnvConversion(env): if env == 'b': return [TextElement('text 2'), ContentElement()] else: return None def mockEnvDefinition(env): if env == 'b': return {'a': [TextElement('text 3')], 'type': None} else: return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a\begin{b}\a\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) print("TESTING SSSML PARSE " + str(ssmlParseTree)) # Check resulting tree structure self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 0) self.assertEqual(ssmlParseTree.getHeadText().strip().replace(" ", " "), 'text 1 text 2 text 3') ''' Tests the BreakElement with various attributes in both commands and environments. ''' @patch('conversion_db.ConversionDB') def testBreakElement(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': return [BreakElement(time='3ms')] else: return None def mockEnvConversion(env): if env == 'b': return [BreakElement(strength='strong'), ContentElement(), BreakElement(strength='weak')] else: return None def mockEnvDefinition(env): if env == 'b': return {'a': [BreakElement(time='5ms', strength='x-weak')], 'mathmode': False} else: return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a\begin{b}\a\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) # Check resulting tree structure self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 4) self.assertIsInstance(ssmlParseTree.children[0], BreakElement) self.assertEqual(ssmlParseTree.children[0].getTime(), '3ms') self.assertEqual(ssmlParseTree.children[0].getStrength(), None) self.assertIsInstance(ssmlParseTree.children[1], BreakElement) self.assertEqual(ssmlParseTree.children[1].getTime(), None) self.assertEqual(ssmlParseTree.children[1].getStrength(), 'strong') self.assertIsInstance(ssmlParseTree.children[2], BreakElement) self.assertEqual(ssmlParseTree.children[2].getTime(), '5ms') self.assertEqual(ssmlParseTree.children[2].getStrength(), 'x-weak') self.assertIsInstance(ssmlParseTree.children[3], BreakElement) self.assertEqual(ssmlParseTree.children[3].getTime(), None) self.assertEqual(ssmlParseTree.children[3].getStrength(), 'weak') ''' Tests the EmphasisElement with various attributes in both commands and environments. One important test is here is ensuring the ContentElement and ArgElement work properly while being children of an EmphasisElement. ''' @patch('conversion_db.ConversionDB') def testEmphasisElement(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [EmphasisElement(level='strong'), ArgElement(1)] a[0].insertChild(0, EmphasisElement(level='reduced')) a[0].children[0].insertChild(0, ArgElement(2)) return a else: return None def mockEnvConversion(env): if env == 'b': b = [ContentElement(), EmphasisElement(level='moderate'), ArgElement(2), EmphasisElement(level='none')] b[1].insertChild(0, ContentElement()) b[1].insertChild(0, ArgElement(1)) b[3].insertChild(0, EmphasisElement(level='strong')) return b else: return None def mockEnvDefinition(env): return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a{1}{2}\begin{b}{3}{4}\a{5}{6}\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 4) self.assertIsInstance(ssmlParseTree.children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[0].getTailText().strip(), '1') self.assertEqual(ssmlParseTree.children[0].getLevel(), 'strong') self.assertEqual(len(ssmlParseTree.children[0].children), 1) self.assertIsInstance(ssmlParseTree.children[0].children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[0].children[0].getHeadText().strip(), '2') self.assertEqual(ssmlParseTree.children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[0].children[0].getLevel(), 'reduced') self.assertIsInstance(ssmlParseTree.children[1], EmphasisElement) self.assertEqual(ssmlParseTree.children[1].getHeadText(), '') self.assertEqual(ssmlParseTree.children[1].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[1].getLevel(), 'strong') self.assertEqual(len(ssmlParseTree.children[1].children), 1) self.assertIsInstance(ssmlParseTree.children[1].children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[1].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[1].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[1].children[0].getLevel(), 'reduced') self.assertIsInstance(ssmlParseTree.children[2], EmphasisElement) self.assertEqual(ssmlParseTree.children[2].getHeadText().strip(), '3') self.assertEqual(ssmlParseTree.children[2].getTailText().strip(), '4') self.assertEqual(ssmlParseTree.children[2].getLevel(), 'moderate') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[2].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[2].children[0].getLevel(), 'strong') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0].children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getLevel(), 'reduced') self.assertIsInstance(ssmlParseTree.children[3], EmphasisElement) self.assertEqual(ssmlParseTree.children[3].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].getLevel(), 'none') self.assertEqual(len(ssmlParseTree.children[3].children), 1) self.assertIsInstance(ssmlParseTree.children[3].children[0], EmphasisElement) self.assertEqual(ssmlParseTree.children[3].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getLevel(), 'strong') ''' Tests that arguments are properly expanded when the ArgElement object is used in cmd/env definitions. ''' @patch('conversion_db.ConversionDB') def testArgElement(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [ArgElement(2), ArgElement('1', argType='bracket')] return a elif cmd == 'd': d = [BreakElement()] return d else: return None def mockEnvConversion(env): if env == 'b': b = [ArgElement(1, 'bracket'), ArgElement(4, argType='brace'), ContentElement()] return b else: return None def mockEnvDefinition(env): if env == 'b': return {'a': [ArgElement(1), ArgElement('2', argType='bracket')], \ 'c': [ArgElement(3)], 'mathmode': False} else: return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a{1}{\a{2}[3]{\d}}[\d]\begin{b}{4}{5}[6]{7}{\a[8]{9}{10}[\d]}\d\a[11]{12}[13]\d\c{14}{15}{\d 16}\d\end{b}') # Should be <speak> <break/> 3 <break/> 6 9 <break/> <break/> 12 13 <break/> <break/> 16 <break/> <speak/> # ^ Cmd ^ Env say ^ Env contents parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(ssmlParseTree.getHeadText(), '') self.assertEqual(len(ssmlParseTree.children), 7) self.assertIsInstance(ssmlParseTree.children[0], BreakElement) self.assertEqual(ssmlParseTree.children[0].getTailText().strip(), '3') self.assertIsInstance(ssmlParseTree.children[1], BreakElement) self.assertEqual(ssmlParseTree.children[1].getTailText().strip().replace(" ", " "), '6 9') self.assertIsInstance(ssmlParseTree.children[2], BreakElement) self.assertEqual(ssmlParseTree.children[2].getTailText(), '') self.assertIsInstance(ssmlParseTree.children[3], BreakElement) self.assertEqual(ssmlParseTree.children[3].getTailText().strip().replace(" ", " "), '12 13') self.assertIsInstance(ssmlParseTree.children[4], BreakElement) self.assertEqual(ssmlParseTree.children[4].getTailText(), '') self.assertIsInstance(ssmlParseTree.children[5], BreakElement) self.assertEqual(ssmlParseTree.children[5].getTailText(), ' 16') self.assertIsInstance(ssmlParseTree.children[6], BreakElement) self.assertEqual(ssmlParseTree.children[6].getTailText(), '') ''' Testing environments and ensuring undefined environments still have their contents read out, while defined environments without the content tag are not. ''' @patch('conversion_db.ConversionDB') def testEnvironments(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [TextElement('text1')] return a else: return None def mockEnvConversion(env): if env == 'a': a = [TextElement('text2')] return a if env == 'b': b = [ContentElement()] return b else: return None def mockEnvDefinition(env): if env == 'b': return {'a': [TextElement('text3')], 'mathmode': False} else: return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\begin{a}\a\end{a}\begin{c}\begin{a}\a\end{a}\end{c}\begin{c}\begin{b}\a\end{b}\end{c}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 0) self.assertEqual(ssmlParseTree.getHeadText().strip().replace(" ", " "), 'text2 text2 text3') ''' Prosody <prosody attribute = "value"></prosody> <prosody volume = ""></prosody> - default (regular) - silent, x-soft, soft, medium, loud, x-loud. Sets volume - +ndB, -ndB : Changes volume relative to the current level. A value of +0dB means no change, +6dB means approximately twice the current volume and -6dB means approsimately half the current volume ''' @patch('conversion_db.ConversionDB') def testProsodyElementVolume(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [ProsodyElement(volume='x-loud'), ArgElement(1)] a[0].insertChild(0, ProsodyElement(volume='medium')) a[0].children[0].insertChild(0, ArgElement(2)) return a else: return None def mockEnvConversion(env): if env == 'b': b = [ContentElement(), ProsodyElement(volume='-3dB'), ArgElement(2), ProsodyElement(volume='none')] b[1].insertChild(0, ContentElement()) b[1].insertChild(0, ArgElement(1)) b[3].insertChild(0, ProsodyElement(volume='loud')) return b else: return None def mockEnvDefinition(env): return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a{1}{2}\begin{b}{3}{4}\a{5}{6}\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 4) self.assertIsInstance(ssmlParseTree.children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[0].getTailText().strip(), '1') self.assertEqual(ssmlParseTree.children[0].getVolume(), 'x-loud') self.assertEqual(len(ssmlParseTree.children[0].children), 1) self.assertIsInstance(ssmlParseTree.children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].children[0].getHeadText().strip(), '2') self.assertEqual(ssmlParseTree.children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[0].children[0].getVolume(), 'medium') self.assertIsInstance(ssmlParseTree.children[1], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].getHeadText(), '') self.assertEqual(ssmlParseTree.children[1].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[1].getVolume(), 'x-loud') self.assertEqual(len(ssmlParseTree.children[1].children), 1) self.assertIsInstance(ssmlParseTree.children[1].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[1].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[1].children[0].getVolume(), 'medium') self.assertIsInstance(ssmlParseTree.children[2], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].getHeadText().strip(), '3') self.assertEqual(ssmlParseTree.children[2].getTailText().strip(), '4') self.assertEqual(ssmlParseTree.children[2].getVolume(), '-3dB') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[2].children[0].getVolume(), 'x-loud') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getVolume(), 'medium') self.assertIsInstance(ssmlParseTree.children[3], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].getVolume(), 'medium') self.assertEqual(len(ssmlParseTree.children[3].children), 1) self.assertIsInstance(ssmlParseTree.children[3].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getVolume(), 'loud') ''' <prosody rate = ""></prosody> - x-slow, slow, medium, fast, x-fast. Sets pitch - n% a non negative percentage change in the speaking rate For example, a value of 100% means no change in speaking rate, a value of 200% means twice the default rate, value of 50% means a speaking rate of half the default rate. This value has a range of 20-200%''' @patch('conversion_db.ConversionDB') def testProsodyElementRate(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [ProsodyElement(rate='slow'), ArgElement(1)] a[0].insertChild(0, ProsodyElement(rate='x-fast')) a[0].children[0].insertChild(0, ArgElement(2)) return a else: return None def mockEnvConversion(env): if env == 'b': b = [ContentElement(), ProsodyElement(rate='40%'), ArgElement(2), ProsodyElement(rate='none')] b[1].insertChild(0, ContentElement()) b[1].insertChild(0, ArgElement(1)) b[3].insertChild(0, ProsodyElement(rate='180%')) return b else: return None def mockEnvDefinition(env): return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a{1}{2}\begin{b}{3}{4}\a{5}{6}\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 4) self.assertIsInstance(ssmlParseTree.children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[0].getTailText().strip(), '1') self.assertEqual(ssmlParseTree.children[0].getRate(), 'slow') self.assertEqual(len(ssmlParseTree.children[0].children), 1) self.assertIsInstance(ssmlParseTree.children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].children[0].getHeadText().strip(), '2') self.assertEqual(ssmlParseTree.children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[0].children[0].getRate(), 'x-fast') self.assertIsInstance(ssmlParseTree.children[1], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].getHeadText(), '') self.assertEqual(ssmlParseTree.children[1].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[1].getRate(), 'slow') self.assertEqual(len(ssmlParseTree.children[1].children), 1) self.assertIsInstance(ssmlParseTree.children[1].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[1].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[1].children[0].getRate(), 'x-fast') self.assertIsInstance(ssmlParseTree.children[2], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].getHeadText().strip(), '3') self.assertEqual(ssmlParseTree.children[2].getTailText().strip(), '4') self.assertEqual(ssmlParseTree.children[2].getRate(), '40%') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[2].children[0].getRate(), 'slow') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getRate(), 'x-fast') self.assertIsInstance(ssmlParseTree.children[3], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].getRate(), 'medium') self.assertEqual(len(ssmlParseTree.children[3].children), 1) self.assertIsInstance(ssmlParseTree.children[3].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getRate(), '180%') '''<prosody pitch = ""></prosody> - deafult (regular) - x-low, low, medium, high, x-hgih. Sets pitch - +n% or -n% adjusts pitch by a relative percentage. For example, a value of +0% means no baseline pitch change, +5% gives a little higher baseline pitch, and -5% results in a lower baseline pitch''' @patch('conversion_db.ConversionDB') def testProsodyElementPitch(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() def mockCmdConversion(cmd): if cmd == 'a': a = [ProsodyElement(pitch='x-low'), ArgElement(1)] a[0].insertChild(0, ProsodyElement(pitch='high')) a[0].children[0].insertChild(0, ArgElement(2)) return a else: return None def mockEnvConversion(env): if env == 'b': b = [ContentElement(), ProsodyElement(pitch='-40%'), ArgElement(2), ProsodyElement(pitch='none')] b[1].insertChild(0, ContentElement()) b[1].insertChild(0, ArgElement(1)) b[3].insertChild(0, ProsodyElement(pitch='90%')) return b else: return None def mockEnvDefinition(env): return None db.getCmdConversion = Mock(side_effect=mockCmdConversion) db.getEnvConversion = Mock(side_effect=mockEnvConversion) db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # Set up TexSoup parse tree to be parsed doc = TexSoup.TexSoup(r'\a{1}{2}\begin{b}{3}{4}\a{5}{6}\end{b}') # Parse on the given db and tree parser = ConversionParser(db) ssmlParseTree = parser.parse(doc, test=True) self.assertIsInstance(ssmlParseTree, RootElement) self.assertEqual(len(ssmlParseTree.children), 4) self.assertIsInstance(ssmlParseTree.children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[0].getTailText().strip(), '1') self.assertEqual(ssmlParseTree.children[0].getPitch(), 'x-low') self.assertEqual(len(ssmlParseTree.children[0].children), 1) self.assertIsInstance(ssmlParseTree.children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[0].children[0].getHeadText().strip(), '2') self.assertEqual(ssmlParseTree.children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[0].children[0].getPitch(), 'high') self.assertIsInstance(ssmlParseTree.children[1], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].getHeadText(), '') self.assertEqual(ssmlParseTree.children[1].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[1].getPitch(), 'x-low') self.assertEqual(len(ssmlParseTree.children[1].children), 1) self.assertIsInstance(ssmlParseTree.children[1].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[1].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[1].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[1].children[0].getPitch(), 'high') self.assertIsInstance(ssmlParseTree.children[2], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].getHeadText().strip(), '3') self.assertEqual(ssmlParseTree.children[2].getTailText().strip(), '4') self.assertEqual(ssmlParseTree.children[2].getPitch(), '-40%') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].getTailText().strip(), '5') self.assertEqual(ssmlParseTree.children[2].children[0].getPitch(), 'x-low') self.assertEqual(len(ssmlParseTree.children[2].children), 1) self.assertIsInstance(ssmlParseTree.children[2].children[0].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getHeadText().strip(), '6') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getPitch(), 'high') self.assertIsInstance(ssmlParseTree.children[3], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].getPitch(), 'medium') self.assertEqual(len(ssmlParseTree.children[3].children), 1) self.assertIsInstance(ssmlParseTree.children[3].children[0], ProsodyElement) self.assertEqual(ssmlParseTree.children[3].children[0].getHeadText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getTailText(), '') self.assertEqual(ssmlParseTree.children[3].children[0].getPitch(), '+90%') '''<prosody amazon:max-duration = "2s"></prosody> - "n"s maximum duration in seconds - "n"ms maximum duration in milliseconds''' @patch('conversion_db.ConversionDB') def testProsodyElementMaxDura(self, MockConversionDB): # Set up mock database db = conversion_db.ConversionDB() # def mockCmdConversion(cmd): # if cmd == 'a': # a = [ProsodyElement(duration='2000s'), ArgElement(1)] # a[0].insertChild(0, ProsodyElement(duration='1000s')) # a[0].children[0].insertChild(0, ArgElement(2)) # return a # else: # return None # def mockEnvConversion(env): # if env == 'b': # b = [ContentElement(), ProsodyElement(duration='3000ms'), ArgElement(2), ProsodyElement(duration='5000ms')] # b[1].insertChild(0, ContentElement()) # b[1].insertChild(0, ArgElement(1)) # b[3].insertChild(0, ProsodyElement(duration='9000s')) # return b # else: # return None # def mockEnvDefinition(env): # return None # db.getCmdConversion = Mock(side_effect=mockCmdConversion) # db.getEnvConversion = Mock(side_effect=mockEnvConversion) # db.getEnvDefinition = Mock(side_effect=mockEnvDefinition) # # Set up TexSoup parse tree to be parsed # doc = TexSoup.TexSoup(r'\a{1}{2}\begin{b}{3}{4}\a{5}{6}\end{b}') # # Parse on the given db and tree # parser = ConversionParser(db) # ssmlParseTree = parser.parse(doc, test=True) # self.assertIsInstance(ssmlParseTree, RootElement) # self.assertEqual(len(ssmlParseTree.children), 4) # self.assertIsInstance(ssmlParseTree.children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[0].getHeadText(), '') # self.assertEqual(ssmlParseTree.children[0].getTailText(), '1') # self.assertEqual(ssmlParseTree.children[0].getDuration(), '2000000ms') # self.assertEqual(len(ssmlParseTree.children[0].children), 1) # self.assertIsInstance(ssmlParseTree.children[0].children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[0].children[0].getHeadText(), '2') # self.assertEqual(ssmlParseTree.children[0].children[0].getTailText(), '') # self.assertEqual(ssmlParseTree.children[0].children[0].getDuration(), '1000000ms') # self.assertIsInstance(ssmlParseTree.children[1], ProsodyElement) # self.assertEqual(ssmlParseTree.children[1].getHeadText(), '') # self.assertEqual(ssmlParseTree.children[1].getTailText(), '5') # self.assertEqual(ssmlParseTree.children[1].getDuration(), '2000000ms') # self.assertEqual(len(ssmlParseTree.children[1].children), 1) # self.assertIsInstance(ssmlParseTree.children[1].children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[1].children[0].getHeadText(), '6') # self.assertEqual(ssmlParseTree.children[1].children[0].getTailText(), '') # self.assertEqual(ssmlParseTree.children[1].children[0].getDuration(), '1000000ms') # self.assertIsInstance(ssmlParseTree.children[2], ProsodyElement) # self.assertEqual(ssmlParseTree.children[2].getHeadText(), '3') # self.assertEqual(ssmlParseTree.children[2].getTailText(), '4') # self.assertEqual(ssmlParseTree.children[2].getDuration(), '3000ms') # self.assertEqual(len(ssmlParseTree.children[2].children), 1) # self.assertIsInstance(ssmlParseTree.children[2].children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[2].children[0].getHeadText(), '') # self.assertEqual(ssmlParseTree.children[2].children[0].getTailText(), '5') # self.assertEqual(ssmlParseTree.children[2].children[0].getDuration(), '2000000ms') # self.assertEqual(len(ssmlParseTree.children[2].children), 1) # self.assertIsInstance(ssmlParseTree.children[2].children[0].children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getHeadText(), '6') # self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getTailText(), '') # self.assertEqual(ssmlParseTree.children[2].children[0].children[0].getDuration(), '1000000ms') # self.assertIsInstance(ssmlParseTree.children[3], ProsodyElement) # self.assertEqual(ssmlParseTree.children[3].getHeadText(), '') # self.assertEqual(ssmlParseTree.children[3].getTailText(), '') # self.assertEqual(ssmlParseTree.children[3].getDuration(), '5000ms') # self.assertEqual(len(ssmlParseTree.children[3].children), 1) # self.assertIsInstance(ssmlParseTree.children[3].children[0], ProsodyElement) # self.assertEqual(ssmlParseTree.children[3].children[0].getHeadText(), '') # self.assertEqual(ssmlParseTree.children[3].children[0].getTailText(), '') # self.assertEqual(ssmlParseTree.children[3].children[0].getDuration(), '9000000ms') # Test cases for prosody -> A lot (Might need a different function for each attribute) Only weird if there is nested resolution (not sure if we will impelement it yet, whatJacob is doing for emphasis). -> Assume we will be doing it since the custoemr asked us to do it # When the mocks are happenign you have to return mock objects # Convert previous janky xml into the new format # Update XML # Design XML documentation # For each node # Looks at child but if has emphasis fine # Look at next, possibly creates new node, reaches up to the parent, modifies the list of children, then leave, now it's the parents turn
49.972752
277
0.63615
3,688
36,680
6.312364
0.086768
0.216495
0.184021
0.231959
0.837715
0.81134
0.79884
0.785438
0.769588
0.763789
0
0.02523
0.231707
36,680
734
278
49.972752
0.800859
0.155643
0
0.715247
0
0.013453
0.042016
0.020152
0
0
0
0
0.46861
1
0.073991
false
0
0.029148
0.008969
0.20852
0.002242
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
11
3971fafbd8fda3a1a800ec7a93e6df0af7d4f9f8
246
py
Python
superorm/orm.py
lyoshur/superorm
16bd9492fe9b224e6798e1d2895121adf3fefdfb
[ "MIT" ]
null
null
null
superorm/orm.py
lyoshur/superorm
16bd9492fe9b224e6798e1d2895121adf3fefdfb
[ "MIT" ]
null
null
null
superorm/orm.py
lyoshur/superorm
16bd9492fe9b224e6798e1d2895121adf3fefdfb
[ "MIT" ]
null
null
null
# noinspection PyUnresolvedReferences from superorm.factory import SQLSessionFactoryBuild as Builder # noinspection PyUnresolvedReferences from superorm.mapper import parse4file as parse_config_from_file, parse4string as parse_config_from_string
49.2
106
0.894309
27
246
7.925926
0.592593
0.317757
0.35514
0.429907
0
0
0
0
0
0
0
0.008889
0.085366
246
4
107
61.5
0.942222
0.288618
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
3982e3b831910ca16f6b4ba751cb93feada5f06e
166
py
Python
comet/validator/__init__.py
shinybrar/Comet
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
[ "BSD-2-Clause" ]
15
2015-11-29T18:53:58.000Z
2022-03-09T15:47:30.000Z
comet/validator/__init__.py
shinybrar/Comet
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
[ "BSD-2-Clause" ]
29
2016-01-21T18:10:45.000Z
2021-10-01T16:41:12.000Z
comet/validator/__init__.py
shinybrar/Comet
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
[ "BSD-2-Clause" ]
11
2016-01-22T14:05:51.000Z
2022-03-09T17:49:56.000Z
# Comet VOEvent Broker. # VOEvent validation. from comet.validator.ivoid import * from comet.validator.previously_seen import * from comet.validator.schema import *
23.714286
45
0.801205
21
166
6.285714
0.52381
0.204545
0.409091
0.363636
0
0
0
0
0
0
0
0
0.120482
166
6
46
27.666667
0.90411
0.246988
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
3986e1c98d102a9f66e500a4896ee32721ae0bb8
3,118
py
Python
pyaz/iot/dps/linked_hub/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/iot/dps/linked_hub/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/iot/dps/linked_hub/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
1
2022-02-03T09:12:01.000Z
2022-02-03T09:12:01.000Z
from .... pyaz_utils import _call_az def list(dps_name, resource_group): ''' List all linked IoT hubs in an Azure IoT Hub device provisioning service. Required Parameters: - dps_name -- IoT Provisioning Service name - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>` ''' return _call_az("az iot dps linked-hub list", locals()) def show(dps_name, linked_hub, resource_group): ''' Show details of a linked IoT hub in an Azure IoT Hub device provisioning service. Required Parameters: - dps_name -- IoT Provisioning Service name - linked_hub -- Host name of linked IoT Hub. - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>` ''' return _call_az("az iot dps linked-hub show", locals()) def create(connection_string, dps_name, location, resource_group, allocation_weight=None, apply_allocation_policy=None, no_wait=None): ''' Create a linked IoT hub in an Azure IoT Hub device provisioning service. Required Parameters: - connection_string -- Connection string of the IoT hub. - dps_name -- IoT Provisioning Service name - location -- Location of the IoT hub. - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>` Optional Parameters: - allocation_weight -- Allocation weight of the IoT hub. - apply_allocation_policy -- A boolean indicating whether to apply allocation policy to the IoT hub. - no_wait -- Do not wait for the long-running operation to finish. ''' return _call_az("az iot dps linked-hub create", locals()) def update(dps_name, linked_hub, resource_group, allocation_weight=None, apply_allocation_policy=None, no_wait=None): ''' Update a linked IoT hub in an Azure IoT Hub device provisioning service. Required Parameters: - dps_name -- IoT Provisioning Service name - linked_hub -- Host name of linked IoT Hub. - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>` Optional Parameters: - allocation_weight -- Allocation weight of the IoT hub. - apply_allocation_policy -- A boolean indicating whether to apply allocation policy to the Iot hub. - no_wait -- Do not wait for the long-running operation to finish. ''' return _call_az("az iot dps linked-hub update", locals()) def delete(dps_name, linked_hub, resource_group, no_wait=None): ''' Update a linked IoT hub in an Azure IoT Hub device provisioning service. Required Parameters: - dps_name -- IoT Provisioning Service name - linked_hub -- Host name of linked IoT Hub. - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>` Optional Parameters: - no_wait -- Do not wait for the long-running operation to finish. ''' return _call_az("az iot dps linked-hub delete", locals())
41.573333
134
0.718409
441
3,118
4.945578
0.145125
0.049519
0.051353
0.02751
0.873453
0.873453
0.824392
0.824392
0.824392
0.824392
0
0
0.202373
3,118
74
135
42.135135
0.87696
0.6873
0
0
0
0
0.174807
0
0
0
0
0
0
1
0.454545
false
0
0.090909
0
1
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
8
39923941601e1e7799f98273e93619aa0ccd91a4
1,718
py
Python
retrieval/hybrid/hybrid.py
park-sungmoo/odqa_baseline_code
45954be766e5f987bef18e5b8a2e47f1508742cd
[ "Apache-2.0" ]
67
2021-05-12T15:54:28.000Z
2022-03-12T15:55:35.000Z
retrieval/hybrid/hybrid.py
park-sungmoo/odqa_baseline_code
45954be766e5f987bef18e5b8a2e47f1508742cd
[ "Apache-2.0" ]
71
2021-05-01T06:07:37.000Z
2022-01-28T16:54:46.000Z
retrieval/hybrid/hybrid.py
park-sungmoo/odqa_baseline_code
45954be766e5f987bef18e5b8a2e47f1508742cd
[ "Apache-2.0" ]
14
2021-05-24T10:57:27.000Z
2022-02-18T06:34:11.000Z
from retrieval.dense import DprBert from retrieval.hybrid import HybridRetrieval, HybridLogisticRetrieval from retrieval.sparse import TfidfRetrieval, ATIREBM25Retrieval class TfidfDprBert(HybridRetrieval): def __init__(self, args): super().__init__(args) temp = args.model.retriever_name args.model.retriever_name = "TFIDF" self.sparse_retriever = TfidfRetrieval(args) args.model.retriever_name = "DPRBERT" self.dense_retriever = DprBert(args) args.model.retriever_name = temp class AtireBm25DprBert(HybridRetrieval): def __init__(self, args): super().__init__(args) temp = args.model.retriever_name args.model.retriever_name = "ATIREBM25" self.sparse_retriever = ATIREBM25Retrieval(args) args.model.retriever_name = "DPRBERT" self.dense_retriever = DprBert(args) args.model.retriever_name = temp class LogisticTfidfDprBert(HybridLogisticRetrieval): def __init__(self, args): super().__init__(args) temp = args.model.retriever_name args.model.retriever_name = "TFIDF" self.sparse_retriever = ATIREBM25Retrieval(args) args.model.retriever_name = "DPRBERT" self.dense_retriever = DprBert(args) args.model.retriever_name = temp class LogisticAtireBm25DprBert(HybridLogisticRetrieval): def __init__(self, args): super().__init__(args) temp = args.model.retriever_name args.model.retriever_name = "ATIREBM25" self.sparse_retriever = ATIREBM25Retrieval(args) args.model.retriever_name = "DPRBERT" self.dense_retriever = DprBert(args) args.model.retriever_name = temp
30.678571
69
0.700233
177
1,718
6.480226
0.152542
0.125545
0.25109
0.306888
0.789015
0.789015
0.789015
0.789015
0.789015
0.789015
0
0.011834
0.213038
1,718
55
70
31.236364
0.836538
0
0
0.794872
0
0
0.032596
0
0
0
0
0
0
1
0.102564
false
0
0.076923
0
0.282051
0
0
0
0
null
0
1
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
f2fc5649e7cfdac88de77d7b03a7838ce24467b1
9,177
py
Python
forecasting/short_term_forecasting.py
Matrixeigs/energy_management_system
b2af6a3cfa71173f33d798e943f605d802aed19f
[ "MIT" ]
68
2017-11-21T02:49:11.000Z
2022-03-25T07:14:42.000Z
forecasting/short_term_forecasting.py
yifeili/energy_management_system
b2af6a3cfa71173f33d798e943f605d802aed19f
[ "MIT" ]
null
null
null
forecasting/short_term_forecasting.py
yifeili/energy_management_system
b2af6a3cfa71173f33d798e943f605d802aed19f
[ "MIT" ]
34
2017-11-21T02:52:15.000Z
2022-03-27T14:35:25.000Z
# Short_term forecasting for local energy management system # Include the pv forecasting, wp forecasting, # In this forecasting system, the tensor flow will be deployed and used. # The training from data_management.database_format import db_short_term_forecasting,one_minute_history_data import random from configuration.configuration_time_line import default_time from configuration.configuration_database import local_history_database from sqlalchemy import create_engine, and_ # Import database from sqlalchemy.orm import sessionmaker db_str = local_history_database["db_str"] engine = create_engine(db_str, echo=False) Session = sessionmaker(bind=engine) session_source = Session() def blank_forecasting_result(*args): Target_time = args[0] default_result = db_short_term_forecasting \ (TIME_STAMP=Target_time, AC_PD=0, AC_QD=0, UAC_PD=0, UAC_QD=0, DC_PD=0, UDC_PD=0, PV_PG=0, WP_PG=0, PRICE=0, ) return default_result def short_term_forecasting_pv(*args): # Short term forecasting for photovoltaic session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() PV_PG = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.PV_PG = PV_PG session.commit() return PV_PG def short_term_forecasting_wp(*args): # Short term forecasting for wind power session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() WP_PG = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.WP_PG = WP_PG session.commit() return WP_PG def short_term_forecasting_load_ac(*args): # Short term forecasting for critical AC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() AC_PD = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.AC_PD = AC_PD session.commit() return AC_PD def short_term_forecasting_load_uac(*args): # Short term forecasting for non-critical AC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() UAC_PD = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.UAC_PD = UAC_PD session.commit() return UAC_PD def short_term_forecasting_load_dc(*args): # Short term forecasting for critical DC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() DC_PD = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.DC_PD = DC_PD session.commit() return DC_PD def short_term_forecasting_load_udc(*args): # Short term forecasting for non-critical DC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() UDC_PD = random.random() row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.UDC_PD = UDC_PD session.commit() return UDC_PD def short_term_forecasting_pv_history(*args): # Short term forecasting for photovoltaic session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() PV_PG = row_source.PV_PG row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.PV_PG = PV_PG session.commit() return PV_PG def short_term_forecasting_wp_history(*args): # Short term forecasting for wind power session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() WP_PG = row_source.WP_PG row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.WP_PG = WP_PG session.commit() return WP_PG def short_term_forecasting_load_ac_history(*args): # Short term forecasting for critical AC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() AC_PD = row_source.AC_PD row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.AC_PD = AC_PD session.commit() return AC_PD def short_term_forecasting_load_uac_history(*args): # Short term forecasting for non-critical AC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() UAC_PD = row_source.NAC_PD row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.UAC_PD = UAC_PD session.commit() return UAC_PD def short_term_forecasting_load_dc_history(*args): # Short term forecasting for critical DC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() DC_PD= row_source.DC_PD row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.DC_PD = DC_PD session.commit() return DC_PD def short_term_forecasting_load_udc_history(*args): # Short term forecasting for non-critical DC load session = args[0] Target_Time = args[1] if session.query(db_short_term_forecasting).filter( db_short_term_forecasting.TIME_STAMP == Target_Time).count() == 0: blank_row = blank_forecasting_result(Target_Time) session.add(blank_row) session.commit() row_source = session_source.query(one_minute_history_data).filter_by( TIME_STAMP=int((Target_Time - default_time["Base_time"]) / default_time["Time_step_opf"])).first() UDC_PD = row_source.NDC_PD row = session.query(db_short_term_forecasting).filter_by(TIME_STAMP=Target_Time).first() row.UDC_PD = UDC_PD session.commit() return UDC_PD
33.25
106
0.708075
1,270
9,177
4.746457
0.066929
0.094061
0.209025
0.138686
0.868447
0.860153
0.85783
0.855508
0.846384
0.846384
0
0.006247
0.197668
9,177
276
107
33.25
0.812441
0.079002
0
0.75
0
0
0.01636
0
0
0
0
0
0
1
0.067708
false
0
0.03125
0
0.166667
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
84024d95fc49552c36c741ab96fc8ccf3b7eb6d7
162
py
Python
test/test_cards_shuffle.py
erichaase/topcoder-python
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
[ "MIT" ]
1
2017-03-25T17:40:57.000Z
2017-03-25T17:40:57.000Z
test/test_cards_shuffle.py
erichaase/topcoder-python
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
[ "MIT" ]
null
null
null
test/test_cards_shuffle.py
erichaase/topcoder-python
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
[ "MIT" ]
null
null
null
from test.assert_json import assert_json from topcoder.cards_shuffle import solution def test_cards_shuffle (): assert_json('cards_shuffle', solution)
27
46
0.790123
22
162
5.5
0.454545
0.247934
0
0
0
0
0
0
0
0
0
0
0.148148
162
5
47
32.4
0.876812
0
0
0
0
0
0.080247
0
0
0
0
0
0.5
1
0.25
true
0
0.5
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
1
0
0
0
0
7
840a7efa9c03752d0b4d50a2471082cf4199581e
2,088
py
Python
tests/test_defect_species.py
j-m-dean/pyscses
6c2875cb87a8f91ae7aed382922c34b0e611ba85
[ "MIT" ]
null
null
null
tests/test_defect_species.py
j-m-dean/pyscses
6c2875cb87a8f91ae7aed382922c34b0e611ba85
[ "MIT" ]
null
null
null
tests/test_defect_species.py
j-m-dean/pyscses
6c2875cb87a8f91ae7aed382922c34b0e611ba85
[ "MIT" ]
null
null
null
import unittest from pyscses.defect_species import DefectSpecies class TestDefectSpecies(unittest.TestCase): def test_init(self): defect_species = DefectSpecies(label='VO', valence=+2.0, mole_fraction=0.1, mobility=0.1, fixed=True) self.assertEqual(defect_species.label, 'VO') self.assertEqual(defect_species.valence, 2.0) self.assertEqual(defect_species.mole_fraction, 0.1) self.assertEqual(defect_species.mobility, 0.1) self.assertEqual(defect_species.fixed, True) def test_init_defaults(self): defect_species = DefectSpecies(label='VO', valence=+2.0, mole_fraction=0.1) self.assertEqual(defect_species.label, 'VO') self.assertEqual(defect_species.valence, 2.0) self.assertEqual(defect_species.mole_fraction, 0.1) self.assertEqual(defect_species.mobility, 0.0) self.assertEqual(defect_species.fixed, False) def test_init_raises_TypeError_if_label_is_incorrect_type(self): with self.assertRaises(TypeError): defect_species = DefectSpecies(label=3.0, valence=+2.0, mole_fraction=0.1) def test_init_raises_TypeError_if_valence_is_incorrect_type(self): with self.assertRaises(TypeError): defect_species = DefectSpecies(label='VO', valence='foo', mole_fraction=0.1) def test_init_raises_TypeError_if_mole_fraction_is_incorrect_type(self): with self.assertRaises(TypeError): defect_species = DefectSpecies(label='VO', valence=+2.0, mole_fraction='foo') if __name__ == '__main__': unittest.main()
43.5
76
0.548851
204
2,088
5.328431
0.196078
0.191352
0.193192
0.25759
0.809568
0.773689
0.721251
0.712971
0.712971
0.712971
0
0.024316
0.369732
2,088
47
77
44.425532
0.801672
0
0
0.5
0
0
0.012452
0
0
0
0
0
0.325
1
0.125
false
0
0.05
0
0.2
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
842f0ca14bf93481d51627035a2494301ee56627
145
py
Python
src/timer/model/__init__.py
jakob-bagterp/timer-for-python
a48b60c8782bbf6d368d6ca2be249054c3b66c21
[ "MIT" ]
2
2022-03-22T11:14:37.000Z
2022-03-24T14:27:13.000Z
src/timer/model/__init__.py
jakob-bagterp/timer-for-python
a48b60c8782bbf6d368d6ca2be249054c3b66c21
[ "MIT" ]
null
null
null
src/timer/model/__init__.py
jakob-bagterp/timer-for-python
a48b60c8782bbf6d368d6ca2be249054c3b66c21
[ "MIT" ]
null
null
null
__all__ = ["elapsed_time_fractions", "thread_item", "timer", "timer_base"] from . import elapsed_time_fractions, thread_item, timer, timer_base
36.25
74
0.77931
19
145
5.315789
0.526316
0.217822
0.39604
0.514851
0.871287
0.871287
0.871287
0.871287
0
0
0
0
0.096552
145
3
75
48.333333
0.770992
0
0
0
0
0
0.331034
0.151724
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
1
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
10
845213d1bdfb6080582887b30e7cba96f7201a93
19,931
py
Python
billforward/apis/metadata_api.py
billforward/bf-python
d2b812329ca3ed1fd94364d7f46f69ad74665596
[ "Apache-2.0" ]
2
2016-11-23T17:32:37.000Z
2022-02-24T05:13:20.000Z
billforward/apis/metadata_api.py
billforward/bf-python
d2b812329ca3ed1fd94364d7f46f69ad74665596
[ "Apache-2.0" ]
null
null
null
billforward/apis/metadata_api.py
billforward/bf-python
d2b812329ca3ed1fd94364d7f46f69ad74665596
[ "Apache-2.0" ]
1
2016-12-30T20:02:48.000Z
2016-12-30T20:02:48.000Z
# coding: utf-8 """ BillForward REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class MetadataApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def delete_metadata_key_values(self, **kwargs): """ Remove any associated metadata. {\"nickname\":\"Clear metadata from organization\",\"request\" :\"deleteOrganizationMetadataRequest.html\",\"response\":\"deleteOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_metadata_key_values(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_metadata_key_values_with_http_info(**kwargs) else: (data) = self.delete_metadata_key_values_with_http_info(**kwargs) return data def delete_metadata_key_values_with_http_info(self, **kwargs): """ Remove any associated metadata. {\"nickname\":\"Clear metadata from organization\",\"request\" :\"deleteOrganizationMetadataRequest.html\",\"response\":\"deleteOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_metadata_key_values_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ all_params = ['organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_metadata_key_values" % key ) params[key] = val del params['kwargs'] resource_path = '/metadata'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DynamicMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_metadata_key_values(self, **kwargs): """ Retrieve any associated metadata. {\"nickname\":\"Retrieve metadata on organization\",\"request\":\"getOrganizationMetadataRequest.html\",\"response\":\"getOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_metadata_key_values(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_metadata_key_values_with_http_info(**kwargs) else: (data) = self.get_metadata_key_values_with_http_info(**kwargs) return data def get_metadata_key_values_with_http_info(self, **kwargs): """ Retrieve any associated metadata. {\"nickname\":\"Retrieve metadata on organization\",\"request\":\"getOrganizationMetadataRequest.html\",\"response\":\"getOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_metadata_key_values_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ all_params = ['organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_metadata_key_values" % key ) params[key] = val del params['kwargs'] resource_path = '/metadata'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DynamicMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def set_metadata_key_values(self, metadata, **kwargs): """ Remove any existing metadata keys and create the provided data. {\"nickname\":\"Set metadata on organization\",\"request\":\"setOrganizationMetadataRequest.html\",\"response\":\"setOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.set_metadata_key_values(metadata, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param DynamicMetadata metadata: (required) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.set_metadata_key_values_with_http_info(metadata, **kwargs) else: (data) = self.set_metadata_key_values_with_http_info(metadata, **kwargs) return data def set_metadata_key_values_with_http_info(self, metadata, **kwargs): """ Remove any existing metadata keys and create the provided data. {\"nickname\":\"Set metadata on organization\",\"request\":\"setOrganizationMetadataRequest.html\",\"response\":\"setOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.set_metadata_key_values_with_http_info(metadata, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param DynamicMetadata metadata: (required) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ all_params = ['metadata', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method set_metadata_key_values" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'metadata' is set if ('metadata' not in params) or (params['metadata'] is None): raise ValueError("Missing the required parameter `metadata` when calling `set_metadata_key_values`") resource_path = '/metadata'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'metadata' in params: body_params = params['metadata'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DynamicMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def upsert_metadata_key_values(self, metadata, **kwargs): """ Update any existing metadata key-values and insert any new key-values, no keys will be removed. {\"nickname\":\"Upsert metadata on organization\",\"request\":\"upsertOrganizationMetadataRequest.html\",\"response\":\"upsertOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upsert_metadata_key_values(metadata, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param DynamicMetadata metadata: (required) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.upsert_metadata_key_values_with_http_info(metadata, **kwargs) else: (data) = self.upsert_metadata_key_values_with_http_info(metadata, **kwargs) return data def upsert_metadata_key_values_with_http_info(self, metadata, **kwargs): """ Update any existing metadata key-values and insert any new key-values, no keys will be removed. {\"nickname\":\"Upsert metadata on organization\",\"request\":\"upsertOrganizationMetadataRequest.html\",\"response\":\"upsertOrganizationMetadataResponse.html\"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upsert_metadata_key_values_with_http_info(metadata, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param DynamicMetadata metadata: (required) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :return: DynamicMetadata If the method is called asynchronously, returns the request thread. """ all_params = ['metadata', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method upsert_metadata_key_values" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'metadata' is set if ('metadata' not in params) or (params['metadata'] is None): raise ValueError("Missing the required parameter `metadata` when calling `upsert_metadata_key_values`") resource_path = '/metadata'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'metadata' in params: body_params = params['metadata'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DynamicMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'))
42.406383
172
0.59144
2,004
19,931
5.685629
0.111776
0.05617
0.047744
0.025276
0.919782
0.910743
0.907758
0.899509
0.894418
0.887748
0
0.000742
0.323817
19,931
469
173
42.496802
0.844698
0.404194
0
0.787736
1
0
0.146939
0.039266
0
0
0
0
0
1
0.042453
false
0
0.033019
0
0.136792
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
082685a2d069cfadc36fb74db620cbb296fe71de
50,718
py
Python
nnunet/network_architecture/segnet.py
NabJa/nnUNet
f017003523f5619d5a4165575c8338bbb8733628
[ "Apache-2.0" ]
null
null
null
nnunet/network_architecture/segnet.py
NabJa/nnUNet
f017003523f5619d5a4165575c8338bbb8733628
[ "Apache-2.0" ]
null
null
null
nnunet/network_architecture/segnet.py
NabJa/nnUNet
f017003523f5619d5a4165575c8338bbb8733628
[ "Apache-2.0" ]
null
null
null
from copy import deepcopy from nnunet.utilities.nd_softmax import softmax_helper from torch import nn import torch import numpy as np from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork import torch.nn.functional from nnunet.network_architecture.generic_UNet import ( ConvDropoutNonlinNorm, StackedConvLayers, Upsample, ) class SegNet(SegmentationNetwork): DEFAULT_BATCH_SIZE_3D = 2 DEFAULT_PATCH_SIZE_3D = (64, 192, 160) SPACING_FACTOR_BETWEEN_STAGES = 2 BASE_NUM_FEATURES_3D = 30 MAX_NUMPOOL_3D = 999 MAX_NUM_FILTERS_3D = 320 DEFAULT_PATCH_SIZE_2D = (256, 256) BASE_NUM_FEATURES_2D = 30 DEFAULT_BATCH_SIZE_2D = 50 MAX_NUMPOOL_2D = 999 MAX_FILTERS_2D = 480 use_this_for_batch_size_computation_2D = 19739648 use_this_for_batch_size_computation_3D = 520000000 # 505789440 def __init__( self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv3d, norm_op=nn.BatchNorm3d, norm_op_kwargs=None, dropout_op=nn.Dropout3d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None, basic_block=ConvDropoutNonlinNorm, seg_output_use_bias=False, ): """ basically more flexible than v1, architecture is the same Does this look complicated? Nah bro. Functionality > usability This does everything you need, including world peace. Questions? -> f.isensee@dkfz.de """ super(SegNet, self).__init__() self.convolutional_upsampling = convolutional_upsampling self.convolutional_pooling = convolutional_pooling self.upscale_logits = upscale_logits if nonlin_kwargs is None: nonlin_kwargs = {"negative_slope": 1e-2, "inplace": True} if dropout_op_kwargs is None: dropout_op_kwargs = {"p": 0.5, "inplace": True} if norm_op_kwargs is None: norm_op_kwargs = {"eps": 1e-5, "affine": True, "momentum": 0.1} self.conv_kwargs = {"stride": 1, "dilation": 1, "bias": True} self.nonlin = nonlin self.nonlin_kwargs = nonlin_kwargs self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.weightInitializer = weightInitializer self.conv_op = conv_op self.norm_op = norm_op self.dropout_op = dropout_op self.num_classes = num_classes self.final_nonlin = final_nonlin self._deep_supervision = deep_supervision self.do_ds = deep_supervision if conv_op == nn.Conv2d: upsample_mode = "bilinear" pool_op = nn.MaxPool2d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3)] * (num_pool + 1) elif conv_op == nn.Conv3d: upsample_mode = "trilinear" pool_op = nn.MaxPool3d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1) else: raise ValueError( "unknown convolution dimensionality, conv op: %s" % str(conv_op) ) self.input_shape_must_be_divisible_by = np.prod( pool_op_kernel_sizes, 0, dtype=np.int64 ) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.conv_kernel_sizes = conv_kernel_sizes self.conv_pad_sizes = [] for krnl in self.conv_kernel_sizes: self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl]) if max_num_features is None: if self.conv_op == nn.Conv3d: self.max_num_features = self.MAX_NUM_FILTERS_3D else: self.max_num_features = self.MAX_FILTERS_2D else: self.max_num_features = max_num_features self.conv_blocks_context = [] self.conv_blocks_localization = [] self.transpose_down = [] self.down_idx = [] self.transpose_up = [] self.seg_outputs = [] output_features = base_num_features input_features = input_channels ############################################# # ENCODER # ############################################# for npool in range(num_pool): # determine the first stride if npool != 0 and self.convolutional_pooling: first_stride = pool_op_kernel_sizes[npool - 1] else: first_stride = None self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[npool] self.conv_kwargs["padding"] = self.conv_pad_sizes[npool] # add convolutions self.conv_blocks_context.append( StackedConvLayers( input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ) ) if not self.convolutional_pooling: # NJ CHANGE 1: SET RETURN_INDICES TO TRUE POOL_OP (=nn.MaxPool3d) tdown = pool_op(pool_op_kernel_sizes[npool], return_indices=True) self.transpose_down.append(tdown) input_features = output_features output_features = int(np.round(output_features * feat_map_mul_on_downscale)) output_features = min(output_features, self.max_num_features) ############################################# # BOTTLENECK # ############################################# # determine the first stride if self.convolutional_pooling: first_stride = pool_op_kernel_sizes[-1] else: first_stride = None # the output of the last conv must match the number of features from the skip connection if we are not using # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be # done by the transposed conv if self.convolutional_upsampling: final_num_features = output_features else: final_num_features = self.conv_blocks_context[-1].output_channels self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[num_pool] self.conv_kwargs["padding"] = self.conv_pad_sizes[num_pool] self.conv_blocks_context.append( nn.Sequential( StackedConvLayers( input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ), StackedConvLayers( output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) ) # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here if not dropout_in_localization: old_dropout_p = self.dropout_op_kwargs["p"] self.dropout_op_kwargs["p"] = 0.0 ############################################# # DECODER # ############################################# for npool in range(num_pool): nfeatures_from_down = final_num_features nfeatures_from_skip = self.conv_blocks_context[ -(2 + npool) ].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2 n_features_after_tu_and_concat = nfeatures_from_skip * 2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again if npool != num_pool - 1 and not self.convolutional_upsampling: final_num_features = self.conv_blocks_context[ -(3 + npool) ].output_channels else: final_num_features = nfeatures_from_skip ############ # NJ CHANGE 2: UPSAMPLING IS DONE VIA UNPOOLING! ############ # if not self.convolutional_upsampling: # self.transpose_up.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode)) # else: # self.transpose_up.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)], # pool_op_kernel_sizes[-(u + 1)], bias=False)) this_pool_op_kernel_size = pool_op_kernel_sizes[-(npool + 1)] self.transpose_up.append( nn.MaxUnpool3d(this_pool_op_kernel_size, this_pool_op_kernel_size) ) ############ # END CHANGE 2 ############ self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[-(npool + 1)] self.conv_kwargs["padding"] = self.conv_pad_sizes[-(npool + 1)] self.conv_blocks_localization.append( nn.Sequential( StackedConvLayers( n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), StackedConvLayers( nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) ) for ds in range(len(self.conv_blocks_localization)): self.seg_outputs.append( conv_op( self.conv_blocks_localization[ds][-1].output_channels, num_classes, 1, 1, 0, 1, 1, seg_output_use_bias, ) ) self.upscale_logits_ops = [] cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1] for usl in range(num_pool - 1): if self.upscale_logits: self.upscale_logits_ops.append( Upsample( scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]), mode=upsample_mode, ) ) else: self.upscale_logits_ops.append(lambda x: x) if not dropout_in_localization: self.dropout_op_kwargs["p"] = old_dropout_p # register all modules properly self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization) self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context) self.transpose_down = nn.ModuleList(self.transpose_down) self.transpose_up = nn.ModuleList(self.transpose_up) self.seg_outputs = nn.ModuleList(self.seg_outputs) if self.upscale_logits: self.upscale_logits_ops = nn.ModuleList( self.upscale_logits_ops ) # lambda x:x is not a Module so we need to distinguish here if self.weightInitializer is not None: self.apply(self.weightInitializer) # self.apply(print_module_training_status) def forward(self, x): skips = [] indicis = [] # NJ Save indeces of nn.MaxPool3d(..., return_indicis=True) seg_outputs = [] for d in range(len(self.conv_blocks_context) - 1): x = self.conv_blocks_context[d](x) skips.append(x) if not self.convolutional_pooling: x, index = self.transpose_down[d](x) indicis.append(index) # NJ Save indeces for every pooling step x = self.conv_blocks_context[-1](x) for u in range(len(self.transpose_up)): x = self.transpose_up[u]( x, indicis[-(u + 1)] ) # NJ Add index to nn.MaxUnpool3d() x = torch.cat((x, skips[-(u + 1)]), dim=1) x = self.conv_blocks_localization[u](x) seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x))) if self._deep_supervision and self.do_ds: return tuple( [seg_outputs[-1]] + [ i(j) for i, j in zip( list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1] ) ] ) else: return seg_outputs[-1] @staticmethod def compute_approx_vram_consumption( patch_size, num_pool_per_axis, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False, conv_per_stage=2, ): """ This only applies for num_conv_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param deep_supervision: :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :param num_modalities: :param num_classes: :param pool_op_kernel_sizes: :return: """ if not isinstance(num_pool_per_axis, np.ndarray): num_pool_per_axis = np.array(num_pool_per_axis) npool = len(pool_op_kernel_sizes) map_size = np.array(patch_size) tmp = np.int64( (conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features + num_modalities * np.prod(map_size, dtype=np.int64) + num_classes * np.prod(map_size, dtype=np.int64) ) num_feat = base_num_features for p in range(npool): for pi in range(len(num_pool_per_axis)): map_size[pi] /= pool_op_kernel_sizes[p][pi] num_feat = min(num_feat * 2, max_num_features) num_blocks = ( (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage ) # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat if deep_supervision and p < (npool - 2): tmp += np.prod(map_size, dtype=np.int64) * num_classes # print(p, map_size, num_feat, tmp) return tmp class SmallSegNet(SegmentationNetwork): DEFAULT_BATCH_SIZE_3D = 2 DEFAULT_PATCH_SIZE_3D = (64, 192, 160) SPACING_FACTOR_BETWEEN_STAGES = 2 BASE_NUM_FEATURES_3D = 30 MAX_NUMPOOL_3D = 999 MAX_NUM_FILTERS_3D = 320 DEFAULT_PATCH_SIZE_2D = (256, 256) BASE_NUM_FEATURES_2D = 30 DEFAULT_BATCH_SIZE_2D = 50 MAX_NUMPOOL_2D = 999 MAX_FILTERS_2D = 480 use_this_for_batch_size_computation_2D = 19739648 use_this_for_batch_size_computation_3D = 520000000 # 505789440 def __init__( self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv3d, norm_op=nn.BatchNorm3d, norm_op_kwargs=None, dropout_op=nn.Dropout3d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None, basic_block=ConvDropoutNonlinNorm, seg_output_use_bias=False, ): """ basically more flexible than v1, architecture is the same Does this look complicated? Nah bro. Functionality > usability This does everything you need, including world peace. Questions? -> f.isensee@dkfz.de """ super(SmallSegNet, self).__init__() self.convolutional_upsampling = convolutional_upsampling self.convolutional_pooling = convolutional_pooling self.upscale_logits = upscale_logits if nonlin_kwargs is None: nonlin_kwargs = {"negative_slope": 1e-2, "inplace": True} if dropout_op_kwargs is None: dropout_op_kwargs = {"p": 0.5, "inplace": True} if norm_op_kwargs is None: norm_op_kwargs = {"eps": 1e-5, "affine": True, "momentum": 0.1} self.conv_kwargs = {"stride": 1, "dilation": 1, "bias": True} self.nonlin = nonlin self.nonlin_kwargs = nonlin_kwargs self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.weightInitializer = weightInitializer self.conv_op = conv_op self.norm_op = norm_op self.dropout_op = dropout_op self.num_classes = num_classes self.final_nonlin = final_nonlin self._deep_supervision = deep_supervision self.do_ds = deep_supervision if conv_op == nn.Conv2d: upsample_mode = "bilinear" pool_op = nn.MaxPool2d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3)] * (num_pool + 1) elif conv_op == nn.Conv3d: upsample_mode = "trilinear" pool_op = nn.MaxPool3d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1) else: raise ValueError( "unknown convolution dimensionality, conv op: %s" % str(conv_op) ) self.input_shape_must_be_divisible_by = np.prod( pool_op_kernel_sizes, 0, dtype=np.int64 ) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.conv_kernel_sizes = conv_kernel_sizes self.conv_pad_sizes = [] for krnl in self.conv_kernel_sizes: self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl]) if max_num_features is None: if self.conv_op == nn.Conv3d: self.max_num_features = self.MAX_NUM_FILTERS_3D else: self.max_num_features = self.MAX_FILTERS_2D else: self.max_num_features = max_num_features self.conv_blocks_context = [] self.conv_blocks_localization = [] self.transpose_down = [] self.down_idx = [] self.transpose_up = [] self.seg_outputs = [] output_features = base_num_features input_features = input_channels ############################################# # ENCODER # ############################################# for npool in range(num_pool): # determine the first stride if npool != 0 and self.convolutional_pooling: first_stride = pool_op_kernel_sizes[npool - 1] else: first_stride = None self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[npool] self.conv_kwargs["padding"] = self.conv_pad_sizes[npool] # add convolutions self.conv_blocks_context.append( StackedConvLayers( input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ) ) if not self.convolutional_pooling: # NJ CHANGE 1: SET RETURN_INDICES TO TRUE POOL_OP (=nn.MaxPool3d) tdown = pool_op(pool_op_kernel_sizes[npool], return_indices=True) self.transpose_down.append(tdown) input_features = output_features output_features = int(np.round(output_features * feat_map_mul_on_downscale)) output_features = min(output_features, self.max_num_features) ############################################# # BOTTLENECK # ############################################# # determine the first stride if self.convolutional_pooling: first_stride = pool_op_kernel_sizes[-1] else: first_stride = None # the output of the last conv must match the number of features from the skip connection if we are not using # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be # done by the transposed conv if self.convolutional_upsampling: final_num_features = output_features else: final_num_features = self.conv_blocks_context[-1].output_channels self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[num_pool] self.conv_kwargs["padding"] = self.conv_pad_sizes[num_pool] self.conv_blocks_context.append( nn.Sequential( StackedConvLayers( input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ), StackedConvLayers( output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) ) # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here if not dropout_in_localization: old_dropout_p = self.dropout_op_kwargs["p"] self.dropout_op_kwargs["p"] = 0.0 ############################################# # DECODER # ############################################# for npool in range(num_pool): nfeatures_from_down = final_num_features nfeatures_from_skip = self.conv_blocks_context[ -(2 + npool) ].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2 n_features_after_tu_and_concat = nfeatures_from_skip * 2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again if npool != num_pool - 1 and not self.convolutional_upsampling: final_num_features = self.conv_blocks_context[ -(3 + npool) ].output_channels else: final_num_features = nfeatures_from_skip ############ # NJ CHANGE 2: UPSAMPLING IS DONE VIA UNPOOLING! ############ # if not self.convolutional_upsampling: # self.transpose_up.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode)) # else: # self.transpose_up.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)], # pool_op_kernel_sizes[-(u + 1)], bias=False)) this_pool_op_kernel_size = pool_op_kernel_sizes[-(npool + 1)] self.transpose_up.append( nn.MaxUnpool3d(this_pool_op_kernel_size, this_pool_op_kernel_size) ) ############ # END CHANGE 2 ############ self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[-(npool + 1)] self.conv_kwargs["padding"] = self.conv_pad_sizes[-(npool + 1)] ######## NJ SmallResNet uses only nfeatures_from_skip self.conv_blocks_localization.append( StackedConvLayers( nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) for ds in range(len(self.conv_blocks_localization)): self.seg_outputs.append( conv_op( self.conv_blocks_localization[ds].output_channels, num_classes, 1, 1, 0, 1, 1, seg_output_use_bias, ) ) self.upscale_logits_ops = [] cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1] for usl in range(num_pool - 1): if self.upscale_logits: self.upscale_logits_ops.append( Upsample( scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]), mode=upsample_mode, ) ) else: self.upscale_logits_ops.append(lambda x: x) if not dropout_in_localization: self.dropout_op_kwargs["p"] = old_dropout_p # register all modules properly self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization) self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context) self.transpose_down = nn.ModuleList(self.transpose_down) self.transpose_up = nn.ModuleList(self.transpose_up) self.seg_outputs = nn.ModuleList(self.seg_outputs) if self.upscale_logits: self.upscale_logits_ops = nn.ModuleList( self.upscale_logits_ops ) # lambda x:x is not a Module so we need to distinguish here if self.weightInitializer is not None: self.apply(self.weightInitializer) # self.apply(print_module_training_status) def forward(self, x): skips = [] indicis = [] # NJ Save indeces of nn.MaxPool3d(..., return_indicis=True) seg_outputs = [] for d in range(len(self.conv_blocks_context) - 1): x = self.conv_blocks_context[d](x) skips.append(x) if not self.convolutional_pooling: x, index = self.transpose_down[d](x) indicis.append(index) # NJ Save indeces for every pooling step x = self.conv_blocks_context[-1](x) for u in range(len(self.transpose_up)): x = self.transpose_up[u]( x, indicis[-(u + 1)] ) # NJ Add index to nn.MaxUnpool3d() x = self.conv_blocks_localization[u](x) seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x))) if self._deep_supervision and self.do_ds: return tuple( [seg_outputs[-1]] + [ i(j) for i, j in zip( list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1] ) ] ) else: return seg_outputs[-1] @staticmethod def compute_approx_vram_consumption( patch_size, num_pool_per_axis, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False, conv_per_stage=2, ): """ This only applies for num_conv_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param deep_supervision: :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :param num_modalities: :param num_classes: :param pool_op_kernel_sizes: :return: """ if not isinstance(num_pool_per_axis, np.ndarray): num_pool_per_axis = np.array(num_pool_per_axis) npool = len(pool_op_kernel_sizes) map_size = np.array(patch_size) tmp = np.int64( (conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features + num_modalities * np.prod(map_size, dtype=np.int64) + num_classes * np.prod(map_size, dtype=np.int64) ) num_feat = base_num_features for p in range(npool): for pi in range(len(num_pool_per_axis)): map_size[pi] /= pool_op_kernel_sizes[p][pi] num_feat = min(num_feat * 2, max_num_features) num_blocks = ( (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage ) # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat if deep_supervision and p < (npool - 2): tmp += np.prod(map_size, dtype=np.int64) * num_classes # print(p, map_size, num_feat, tmp) return tmp class SegNetNPool(SegmentationNetwork): """ SegNet Variant with only a limited number of unpoolings. TODO integrate in original SegNet! """ DEFAULT_BATCH_SIZE_3D = 2 DEFAULT_PATCH_SIZE_3D = (64, 192, 160) SPACING_FACTOR_BETWEEN_STAGES = 2 BASE_NUM_FEATURES_3D = 30 MAX_NUMPOOL_3D = 999 MAX_NUM_FILTERS_3D = 320 DEFAULT_PATCH_SIZE_2D = (256, 256) BASE_NUM_FEATURES_2D = 30 DEFAULT_BATCH_SIZE_2D = 50 MAX_NUMPOOL_2D = 999 MAX_FILTERS_2D = 480 use_this_for_batch_size_computation_2D = 19739648 use_this_for_batch_size_computation_3D = 520000000 # 505789440 def __init__( self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv3d, norm_op=nn.BatchNorm3d, norm_op_kwargs=None, dropout_op=nn.Dropout3d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None, basic_block=ConvDropoutNonlinNorm, seg_output_use_bias=False, unpool_on_layers=None, ): """ basically more flexible than v1, architecture is the same Does this look complicated? Nah bro. Functionality > usability This does everything you need, including world peace. Questions? -> f.isensee@dkfz.de """ super(SegNetNPool, self).__init__() self.convolutional_upsampling = convolutional_upsampling self.convolutional_pooling = convolutional_pooling self.upscale_logits = upscale_logits if nonlin_kwargs is None: nonlin_kwargs = {"negative_slope": 1e-2, "inplace": True} if dropout_op_kwargs is None: dropout_op_kwargs = {"p": 0.5, "inplace": True} if norm_op_kwargs is None: norm_op_kwargs = {"eps": 1e-5, "affine": True, "momentum": 0.1} self.conv_kwargs = {"stride": 1, "dilation": 1, "bias": True} self.nonlin = nonlin self.nonlin_kwargs = nonlin_kwargs self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.weightInitializer = weightInitializer self.conv_op = conv_op self.norm_op = norm_op self.dropout_op = dropout_op self.num_classes = num_classes self.final_nonlin = final_nonlin self._deep_supervision = deep_supervision self.do_ds = deep_supervision if conv_op == nn.Conv2d: upsample_mode = "bilinear" pool_op = nn.MaxPool2d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3)] * (num_pool + 1) elif conv_op == nn.Conv3d: upsample_mode = "trilinear" pool_op = nn.MaxPool3d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1) else: raise ValueError( "unknown convolution dimensionality, conv op: %s" % str(conv_op) ) self.input_shape_must_be_divisible_by = np.prod( pool_op_kernel_sizes, 0, dtype=np.int64 ) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.conv_kernel_sizes = conv_kernel_sizes self.conv_pad_sizes = [] for krnl in self.conv_kernel_sizes: self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl]) if max_num_features is None: if self.conv_op == nn.Conv3d: self.max_num_features = self.MAX_NUM_FILTERS_3D else: self.max_num_features = self.MAX_FILTERS_2D else: self.max_num_features = max_num_features self.conv_blocks_context = [] self.conv_blocks_localization = [] self.transpose_down = [] self.down_idx = [] self.transpose_up = [] self.seg_outputs = [] output_features = base_num_features input_features = input_channels if unpool_on_layers is not None: self.unpool_on_layers = unpool_on_layers else: self.unpool_on_layers = list(range(num_pool)) ############################################# # ENCODER # ############################################# for npool in range(num_pool): # determine the first stride if npool != 0 and self.convolutional_pooling: first_stride = pool_op_kernel_sizes[npool - 1] else: first_stride = None self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[npool] self.conv_kwargs["padding"] = self.conv_pad_sizes[npool] # add convolutions self.conv_blocks_context.append( StackedConvLayers( input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ) ) if not self.convolutional_pooling: # NJ CHANGE 1: SET RETURN_INDICES TO TRUE POOL_OP (=nn.MaxPool3d) tdown = pool_op(pool_op_kernel_sizes[npool], return_indices=True) self.transpose_down.append(tdown) input_features = output_features output_features = int(np.round(output_features * feat_map_mul_on_downscale)) output_features = min(output_features, self.max_num_features) ############################################# # BOTTLENECK # ############################################# # determine the first stride if self.convolutional_pooling: first_stride = pool_op_kernel_sizes[-1] else: first_stride = None # the output of the last conv must match the number of features from the skip connection if we are not using # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be # done by the transposed conv if self.convolutional_upsampling: final_num_features = output_features else: final_num_features = self.conv_blocks_context[-1].output_channels self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[num_pool] self.conv_kwargs["padding"] = self.conv_pad_sizes[num_pool] self.conv_blocks_context.append( nn.Sequential( StackedConvLayers( input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block, ), StackedConvLayers( output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) ) # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here if not dropout_in_localization: old_dropout_p = self.dropout_op_kwargs["p"] self.dropout_op_kwargs["p"] = 0.0 ############################################# # DECODER # ############################################# for npool in range(num_pool): nfeatures_from_down = final_num_features nfeatures_from_skip = self.conv_blocks_context[ -(2 + npool) ].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2 n_features_after_tu_and_concat = nfeatures_from_skip * 2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again if npool != num_pool - 1 and not self.convolutional_upsampling: final_num_features = self.conv_blocks_context[ -(3 + npool) ].output_channels else: final_num_features = nfeatures_from_skip ############ # NJ CHANGE 2: UPSAMPLING IS DONE VIA UNPOOLING! ############ # if not self.convolutional_upsampling: # self.transpose_up.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode)) # else: # self.transpose_up.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)], # pool_op_kernel_sizes[-(u + 1)], bias=False)) this_pool_op_kernel_size = pool_op_kernel_sizes[-(npool + 1)] if npool in self.unpool_on_layers: self.transpose_up.append( nn.MaxUnpool3d(this_pool_op_kernel_size, this_pool_op_kernel_size) ) else: self.transpose_up.append( Upsample( scale_factor=pool_op_kernel_sizes[-(npool + 1)], mode=upsample_mode, ) ) ############ # END CHANGE 2 ############ self.conv_kwargs["kernel_size"] = self.conv_kernel_sizes[-(npool + 1)] self.conv_kwargs["padding"] = self.conv_pad_sizes[-(npool + 1)] self.conv_blocks_localization.append( nn.Sequential( StackedConvLayers( n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), StackedConvLayers( nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block, ), ) ) for ds in range(len(self.conv_blocks_localization)): self.seg_outputs.append( conv_op( self.conv_blocks_localization[ds][-1].output_channels, num_classes, 1, 1, 0, 1, 1, seg_output_use_bias, ) ) self.upscale_logits_ops = [] cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1] for usl in range(num_pool - 1): if self.upscale_logits: self.upscale_logits_ops.append( Upsample( scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]), mode=upsample_mode, ) ) else: self.upscale_logits_ops.append(lambda x: x) if not dropout_in_localization: self.dropout_op_kwargs["p"] = old_dropout_p # register all modules properly self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization) self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context) self.transpose_down = nn.ModuleList(self.transpose_down) self.transpose_up = nn.ModuleList(self.transpose_up) self.seg_outputs = nn.ModuleList(self.seg_outputs) if self.upscale_logits: self.upscale_logits_ops = nn.ModuleList( self.upscale_logits_ops ) # lambda x:x is not a Module so we need to distinguish here if self.weightInitializer is not None: self.apply(self.weightInitializer) # self.apply(print_module_training_status) def forward(self, x): skips = [] indicis = [] # NJ Save indeces of nn.MaxPool3d(..., return_indicis=True) seg_outputs = [] for d in range(len(self.conv_blocks_context) - 1): x = self.conv_blocks_context[d](x) skips.append(x) if not self.convolutional_pooling: x, index = self.transpose_down[d](x) indicis.append(index) # NJ Save indeces for every pooling step x = self.conv_blocks_context[-1](x) for u in range(len(self.transpose_up)): if u in self.unpool_on_layers: x = self.transpose_up[u](x, indicis[-(u + 1)]) else: x = self.transpose_up[u](x) x = torch.cat((x, skips[-(u + 1)]), dim=1) x = self.conv_blocks_localization[u](x) seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x))) if self._deep_supervision and self.do_ds: return tuple( [seg_outputs[-1]] + [ i(j) for i, j in zip( list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1] ) ] ) else: return seg_outputs[-1] @staticmethod def compute_approx_vram_consumption( patch_size, num_pool_per_axis, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False, conv_per_stage=2, ): """ This only applies for num_conv_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param deep_supervision: :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :param num_modalities: :param num_classes: :param pool_op_kernel_sizes: :return: """ if not isinstance(num_pool_per_axis, np.ndarray): num_pool_per_axis = np.array(num_pool_per_axis) npool = len(pool_op_kernel_sizes) map_size = np.array(patch_size) tmp = np.int64( (conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features + num_modalities * np.prod(map_size, dtype=np.int64) + num_classes * np.prod(map_size, dtype=np.int64) ) num_feat = base_num_features for p in range(npool): for pi in range(len(num_pool_per_axis)): map_size[pi] /= pool_op_kernel_sizes[p][pi] num_feat = min(num_feat * 2, max_num_features) num_blocks = ( (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage ) # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat if deep_supervision and p < (npool - 2): tmp += np.prod(map_size, dtype=np.int64) * num_classes # print(p, map_size, num_feat, tmp) return tmp
38.248869
127
0.548444
5,644
50,718
4.599575
0.057938
0.04376
0.032357
0.039946
0.97396
0.971572
0.969453
0.969453
0.969453
0.968413
0
0.01705
0.36283
50,718
1,325
128
38.277736
0.786267
0.137328
0
0.912315
0
0
0.013153
0
0
0
0
0.000755
0
1
0.008867
false
0
0.008867
0
0.06798
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
08452be74610c423f3b1af37568b8140e1fb5c80
30,367
py
Python
guessing number/package/ui.py
mahdigoodarzi123/gussingnumbergui
5658674a3bea7a2432ecf48beb7080b9fc4fce26
[ "MIT" ]
null
null
null
guessing number/package/ui.py
mahdigoodarzi123/gussingnumbergui
5658674a3bea7a2432ecf48beb7080b9fc4fce26
[ "MIT" ]
null
null
null
guessing number/package/ui.py
mahdigoodarzi123/gussingnumbergui
5658674a3bea7a2432ecf48beb7080b9fc4fce26
[ "MIT" ]
null
null
null
from abc import abstractclassmethod from tkinter import * from tkinter import font from tkinter.messagebox import * from .bl import * def login_onclick(form , username_entry , password_entry): username = username_entry.get() password = password_entry.get() res2 = get(username , password) if res2[0]=="ERROR": username_entry.delete(0 , END) password_entry.delete(0 , END) showerror("ERROR" , res2[1]) elif res2[1]: # form.destroy() main_form() elif not res2[1]: username_entry.delete(0,END) password_entry.delete(0,END) showerror("ERROR" , "USERNAME OR PASSWORD ERROR") def register_onclick(form , username_entry , password_entry , confirm_password_entry): username = username_entry.get() password = password_entry.get() confirm_password = confirm_password_entry.get() res = add(username , password , confirm_password) if res[0] == "ERROR": username_entry.delete(0,END) password_entry.delete(0,END) confirm_password_entry.delete(0,END) showerror("ERROR",res[1]) else: showinfo("Success",res[1]) form.destroy() login_form() def load_page(): res = init() if res[0] == "ERROR": showerror("ERROR" ,res[1]) def login_to_register(form): form.destroy() register_form() def register_to_login(form): form.destroy() login_form() def login_form(): login = Tk() login.title("login form") login.geometry("350x350") login.configure(bg="white") body = Frame( master=login, bg="white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) # password label Label( master=body, text="username:", bg="white", fg="black", font=("Tahoma" , 10 , "bold"), anchor=W ).pack(padx=(30,30) , pady=(70,0)) # username entry username_entry = Entry( master=body, bg="white", fg="gray", font=("Tahoma",14,"normal") ) username_entry.pack() # password label Label( master=body, text="password:", bg="white", fg="black", font=("Tahoma" , 10 , "bold"), anchor=W ).pack(padx=(30,30) , pady=(60,0)) # password entry password_entry = Entry( master=body, bg="white", fg="gray", font=("Tahoma",14,"normal") ) password_entry.pack(pady=(0,15)) # login button Button( master=body, text="Login!!!", bg="#28a745", fg="white", font= ("Tahoma",10,"bold"), pady= 5, command= lambda: login_onclick(login,username_entry,password_entry) ).pack(side=TOP,fill=X,pady=(0,3),padx=(30,30)) # register button Button( master=body, text="register", bg="red", fg="white", font=("Tahoma",10,"bold"), pady=5, command= lambda : login_to_register(login) ).pack(side=TOP,fill=X,pady=(0,3),padx=(30,30)) login.mainloop() def register_form(): register = Tk() register.title("register form") register.geometry("450x450") register.resizable(width=False , height=False) register.configure(bg="white") body = Frame( master=register, bg="white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) # username region Label ( master=body, text="username:", bg="white", fg="black", font=("Tahoma" , 10 , "bold"), anchor=W ).pack(side=TOP,fill=X,pady=(50,0),padx=(30,30)) #username entry username_entry = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 14 , "normal") ) username_entry.pack(side=TOP,fill=X,pady=(0,10),padx=(30,30)) #password region Label ( master=body, text="password:", bg="white", fg="black", font=("Tahoma" , 10 , "bold"), anchor=W ).pack(side=TOP,fill=X,pady=(20,0),padx=(30,30)) # password entry password_entry = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 14 , "normal") ) password_entry.pack(side=TOP,fill=X,pady=(0,10),padx=(30,30)) # confirm password region Label ( master=body, text="confirm password:", bg="white", fg="black", font=("Tahoma" , 10 , "bold"), anchor=W ).pack(side=TOP,fill=X,pady=(20,0),padx=(30,30)) # confirm password entry confirm_password_entry = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 14 , "normal") ) confirm_password_entry.pack(side=TOP,fill=X,pady=(0,10),padx=(30,30)) #region BUTTON Button( master=body, text="register!!!", bg="green", fg="white", font= ("Tahoma",10,"bold"), pady= 5, command= lambda : register_onclick(register , username_entry , password_entry , confirm_password_entry) ).pack(side=TOP,fill=X,pady=(0,3),padx=(30,30)) #back button Button( master=body, text="BACK!!!", bg="red", fg="white", font= ("Tahoma",10,"bold"), pady= 5, command= lambda : register_to_login(register) ).pack(side=TOP,fill=X,pady=(0,3),padx=(30,30)) #endregion register.mainloop() #forth one def four(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("350x250") main.configure(bg="white") #this is a test lev= 4 #this is a test numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1 = Entry( master=body, bg="white", fg="black", justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) e2 = Entry( master=body, bg="white", fg="black", justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) e3 = Entry( master=body, bg="white", fg="black", justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) e4 = Entry( master=body, bg="white", fg="black", justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, None , None , None , None, None, None, None , main) ).pack(side=TOP , pady=50) main.mainloop() ################################################################ #fifth one def five(): main = Tk() main.title("forth digit number") # main.resizable(0,0) main.geometry("415x250") main.configure(bg="white") lev= 5 #this is a test numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , None , None , None, None, None, None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() #sixth one def six(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("500x250") main.configure(bg="white") lev = 6 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , None , None, None, None, None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() def seven(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("600x250") main.configure(bg="white") lev = 7 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) e7 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e7.place(width=50 , height=50, x=525 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , e7 , None, None, None, None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() def eight(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("700x250") main.configure(bg="white") lev = 8 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) e7 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e7.place(width=50 , height=50, x=525 , y=60) e8 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e8.place(width=50 , height=50, x=610 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , e7 , e8, None, None, None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() def ninth(): main = Tk() main.title("forth digit number") # main.resizable(0,0) main.geometry("800x250") main.configure(bg="white") lev = 9 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) e7 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e7.place(width=50 , height=50, x=525 , y=60) e8 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e8.place(width=50 , height=50, x=610 , y=60) e9 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e9.place(width=50 , height=50, x=695 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , e7 , e8 , e9 , None , None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() def tenth(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("850x250") main.configure(bg="white") lev = 10 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) e7 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e7.place(width=50 , height=50, x=525 , y=60) e8 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e8.place(width=50 , height=50, x=610 , y=60) e9 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e9.place(width=50 , height=50, x=695 , y=60) e10 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e10.place(width=50 , height=50, x=780 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , e7 , e8 , e9 , e10 , None , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() def eleventh(): main = Tk() main.title("forth digit number") main.resizable(0,0) main.geometry("950x250") main.configure(bg="white") lev = 11 numb = number_split(lev) # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry e1= Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e1.place(width=50 , height=50, x=15 , y=60) #second entry e2 =Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e2.place(width=50 , height=50, x=100 , y=60) #third entry e3 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e3.place(width=50 , height=50, x=185 , y=60) #forth entry e4 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e4.place(width=50 , height=50, x=270 , y=60) #fifth entry e5 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e5.place(width=50 , height=50, x=355 , y=60) #sixth entry e6 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e6.place(width=50 , height=50, x=440 , y=60) e7 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e7.place(width=50 , height=50, x=525 , y=60) e8 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e8.place(width=50 , height=50, x=610 , y=60) e9 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e9.place(width=50 , height=50, x=695 , y=60) e10 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e10.place(width=50 , height=50, x=780 , y=60) e11 = Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ) e11.place(width=50 , height=50, x=865 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20, command= lambda: check(numb, e1 , e2 , e3 , e4, e5 , e6 , e7 , e8 , e9 , e10 , e11 , main) ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() # body body = Frame( master= main, bg = "white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) #footer footer = Frame( master=main, bg="white", width=5 ) footer.pack(fill=BOTH,expand=1) footer.propagate(0) #first entry Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ).place(width=50 , height=50, x=15 , y=60) #second entry Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ).place(width=50 , height=50, x=100 , y=60) #third entry Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ).place(width=50 , height=50, x=185 , y=60) #forth entry Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ).place(width=50 , height=50, x=270 , y=60) #fifth entry Entry( master=body, bg="white", fg="black", font=("Tahoma" , 10 , "bold"), justify='center' ).place(width=50 , height=50, x=355 , y=60) #check button Button( master=footer, text="check", bg="green", fg="black", font=("Tahoma" , 10 , "bold"), padx=45, pady=20 ).pack(side=TOP , pady=50) # .place(width=20 , height=20 , x=170 , y=50) main.mainloop() ############################################################## def main_form(): management = Tk() management.title("main page") management.resizable(0,0) management.geometry("514x574") management.configure(bg="white") # body region body = Frame( master=management, bg="white" ) body.pack(fill=BOTH , expand=True) body.propagate(0) # label region for text Label( master=body, text="levels : ", bg="white", fg="black", font= ("Tahoma",10,"bold"), anchor=W ).pack(side=TOP,fill=BOTH) # button 4 Button( master=body , text="4", bg="gray", fg="black", padx=25, pady=20, command= lambda : four() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 5 Button( master=body , text="5", bg="gray", fg="black", padx=25, pady=20, command= lambda : five() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 6 Button( master=body , text="6", bg="gray", fg="black", padx=25, pady=20, command= lambda : six() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 7 Button( master=body , text="7", bg="gray", fg="black", padx=25, pady=20, command= lambda : seven() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 8 Button( master=body , text="8", bg="gray", fg="black", padx=25, pady=20, command= lambda : eight() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 9 Button( master=body , text="9", bg="gray", fg="black", padx=25, pady=20, command= lambda : ninth() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 10 Button( master=body , text="10", bg="gray", fg="black", padx=25, pady=20, command= lambda : tenth() ).pack(side=TOP , fill=BOTH,pady=(0,5)) # button 11 Button( master=body , text="11", bg="gray", fg="black", padx=25, pady=20, command= lambda : eleventh() ).pack(side=TOP , fill=BOTH,pady=(0,5)) management.mainloop() load_page()
19.912787
112
0.470906
3,521
30,367
4.04175
0.047998
0.053123
0.067458
0.089944
0.896423
0.875132
0.861429
0.846813
0.844635
0.823273
0
0.071745
0.368887
30,367
1,525
113
19.912787
0.670806
0.050548
0
0.801131
0
0
0.104925
0
0
0
0
0
0
1
0.01508
false
0.021678
0.004713
0
0.019793
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f2be656b86017d3734014b5017a58b9ce0b4076e
12,888
py
Python
simulate.py
apitsch/drl_mppo
5c76e1f96ec2854c4d446fbad0affaaad3364d51
[ "MIT" ]
2
2019-01-11T05:01:45.000Z
2021-03-03T13:55:54.000Z
simulate.py
apitsch/drl_mppo
5c76e1f96ec2854c4d446fbad0affaaad3364d51
[ "MIT" ]
null
null
null
simulate.py
apitsch/drl_mppo
5c76e1f96ec2854c4d446fbad0affaaad3364d51
[ "MIT" ]
2
2019-05-24T06:38:38.000Z
2020-11-22T00:56:33.000Z
import numpy as np import math import copy from helpers import compute_trade, argmax_index, compute_opt_weight def portfolio_safe(eval_episodes, environment): """ Simulates a no-risk portfolio strategy in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating safe portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: np.random.rand() # random context action = 0. trade = compute_trade(env.p, action, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), action) alloc_to_risk[episode].append(env.p[1]/np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_risky(eval_episodes, environment): """ Simulates a full-risk portfolio strategy in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating risky portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: np.random.rand() # random context action = 1. trade = compute_trade(env.p, action, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), action) alloc_to_risk[episode].append(env.p[1]/np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_myopic(eval_episodes, environment): """ Simulates the optimal myopic portfolio strategy in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating myopic portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: # compute optimal allocation to risky asset: optimal_risky_weight = compute_opt_weight(env, env.time) # no shorting constraint: if optimal_risky_weight > 1: optimal_risky_weight = 1.0 elif optimal_risky_weight < 0: optimal_risky_weight = 0.0 np.random.rand() # random context trade = compute_trade(env.p, optimal_risky_weight, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), optimal_risky_weight) alloc_to_risk[episode].append(env.p[1]/np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_qtab(eval_episodes, environment, agent): """ Simulates a portfolio strategy suggested by a trained tabular Q-learning agent in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. :param agent : AgentQtab instance Tabular Q-learning agent (preferably trained in the same environment as is simulated). Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating tabular Q-learning portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: np.random.rand() # random context action = agent.action_space[argmax_index(agent.q_tab)] trade = compute_trade(env.p, action, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), action) alloc_to_risk[episode].append(env.p[1]/np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_dqn(eval_episodes, environment, agent): """ Simulates a portfolio strategy suggested by a trained DQN agent in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. :param agent : AgentDQN instance DQN agent (preferably trained in the same environment as is simulated). Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating DQN portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: np.random.rand() # random context state = env.get_state() q_pred = np.squeeze(agent.qnn.predict(state)) action = agent.action_space[argmax_index(q_pred)] trade = compute_trade(env.p, action, env.tcost) env.trade(trade) assert math.isclose(env.p[1]/np.sum(env.p), action) alloc_to_risk[episode].append(env.p[1]/np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_reinforce(eval_episodes, environment, agent): """ Simulates a portfolio strategy suggested by a trained REINFORCE agent in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. :param agent : AgentReinforce instance REINFORCE agent (preferably trained in the same environment as is simulated). Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating REINFORCE portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: s = env.get_state() a = agent.choose_action(s) trade = compute_trade(env.p, a, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), a) alloc_to_risk[episode].append(env.p[1] / np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret def portfolio_ac(eval_episodes, environment, agent): """ Simulates a portfolio strategy suggested by a trained actor-critic agent in a given environment. Parameters ---------- :param eval_episodes : int Number of episodes to simulate. :param environment : Env instance Environment in which to simulate the portfolio strategy. :param agent : AgentAC instance Actor-critic agent (preferably trained in the same environment as is simulated). Returns ------- :returns final_u : ndarray Array containing utility of terminal wealth for each simulated episode. :returns alloc_to_risk : ndarray Array containing the share of wealth invested into the risky asset in each period for all simulated episodes. :returns ret : ndarray Array containing the simple gross returns realized in each period for all simulated episodes. """ print("Simulating actor-critic portfolio strategy.") env = copy.deepcopy(environment) final_u = [] alloc_to_risk = [[] for _ in range(eval_episodes)] ret = [] np.random.seed(111) for episode in range(eval_episodes): env.reset() while not env.done: s = env.get_state() a = agent.choose_action(s) trade = compute_trade(env.p, a, env.tcost) env.trade(trade) assert math.isclose(env.p[1] / np.sum(env.p), a) alloc_to_risk[episode].append(env.p[1] / np.sum(env.p)) sgr = env.update() ret.append(sgr) final_u.append(env.get_utility()) final_u = np.array(final_u) alloc_to_risk = np.array(alloc_to_risk) ret = np.array(ret) return final_u, alloc_to_risk, ret
30.468085
79
0.62306
1,625
12,888
4.809846
0.078154
0.032242
0.05911
0.034928
0.917861
0.898925
0.891249
0.891249
0.891249
0.891249
0
0.004713
0.292055
12,888
422
80
30.540284
0.851929
0.431021
0
0.818713
0
0
0.041012
0
0
0
0
0
0.040936
1
0.040936
false
0
0.023392
0
0.105263
0.040936
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
29895ce6aad172ba546898f66790665e99e24916
252
py
Python
src/fsops/fso/__init__.py
nasgoncalves/fsops
01ecf2d5fe1553efb387eb9a67d9ecf34649bddf
[ "Apache-2.0" ]
null
null
null
src/fsops/fso/__init__.py
nasgoncalves/fsops
01ecf2d5fe1553efb387eb9a67d9ecf34649bddf
[ "Apache-2.0" ]
1
2019-10-20T22:50:41.000Z
2019-10-20T22:50:41.000Z
src/fsops/fso/__init__.py
nasgoncalves/fsops
01ecf2d5fe1553efb387eb9a67d9ecf34649bddf
[ "Apache-2.0" ]
null
null
null
from .file_system_object import Object # NOQA from .file_system_object import Type # NOQA from .file_system_object import Time # NOQA from .file_system_object import MetaType # NOQA from .hash import Hash # NOQA from .search import Search # NOQA
36
48
0.785714
38
252
5
0.289474
0.210526
0.294737
0.421053
0.610526
0.473684
0
0
0
0
0
0
0.166667
252
6
49
42
0.904762
0.115079
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d9b21c8680df5cc3b675a53f65450572b7dbf519
87,381
py
Python
venv/Lib/site-packages/baidubce/services/blb/app_blb_client.py
dlfming/dd_catl_demo
6f6f3b502046f638150222c1eb3d68d2b65da04b
[ "MIT" ]
22
2015-10-26T03:00:11.000Z
2021-09-08T09:30:51.000Z
venv/Lib/site-packages/baidubce/services/blb/app_blb_client.py
dlfming/dd_catl_demo
6f6f3b502046f638150222c1eb3d68d2b65da04b
[ "MIT" ]
8
2018-07-18T02:47:09.000Z
2020-12-10T02:30:37.000Z
venv/Lib/site-packages/baidubce/services/blb/app_blb_client.py
dlfming/dd_catl_demo
6f6f3b502046f638150222c1eb3d68d2b65da04b
[ "MIT" ]
14
2016-01-12T11:57:38.000Z
2021-03-10T03:35:12.000Z
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. """ This module provides a client class for APP BLB. """ import copy import json import logging import uuid import sys from baidubce import bce_base_client from baidubce.auth import bce_v1_signer from baidubce.http import bce_http_client from baidubce.http import handler from baidubce.http import http_methods from baidubce import utils from baidubce.utils import required from baidubce import compat if sys.version < '3': sys.setdefaultencoding('utf-8') _logger = logging.getLogger(__name__) class AppBlbClient(bce_base_client.BceBaseClient): """ APP BLB base sdk client """ version = b'/v1' def __init__(self, config=None): bce_base_client.BceBaseClient.__init__(self, config) def _merge_config(self, config=None): """ :param config: :type config: baidubce.BceClientConfiguration :return: """ if config is None: return self.config else: new_config = copy.copy(self.config) new_config.merge_non_none_values(config) return new_config def _send_request(self, http_method, path, body=None, headers=None, params=None, config=None, body_parser=None): config = self._merge_config(config) if body_parser is None: body_parser = handler.parse_json if headers is None: headers = {b'Accept': b'*/*', b'Content-Type': b'application/json;charset=utf-8'} return bce_http_client.send_request( config, bce_v1_signer.sign, [handler.parse_error, body_parser], http_method, path, body, headers, params) @required(vpc_id=(bytes, str), subnet_id=(bytes, str)) def create_app_loadbalancer(self, vpc_id, subnet_id, name=None, desc=None, client_token=None, config=None): """ Create a app LoadBalancer with the specified options. :param name: the name of LoadBalancer to create :type name: string :param desc: The description of LoadBalancer :type desc: string :param vpc_id: id of vpc which the LoadBalancer belong to :type vpc_id: string :param subnet_id: id of subnet which the LoadBalancer belong to :type subnet_id: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) body['vpcId'] = compat.convert_to_string(vpc_id) body['subnetId'] = compat.convert_to_string(subnet_id) return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def update_app_loadbalancer(self, blb_id, name=None, desc=None, client_token=None, config=None): """ Modify the special attribute to new value of the LoadBalancer owned by the user. :param name: name of LoadBalancer to describe :type name: string :param blb_id: id of LoadBalancer to describe :type blb_id: string :param desc: The description of LoadBalancer :type desc: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) return self._send_request(http_methods.PUT, path, json.dumps(body), params=params, config=config) def describe_app_loadbalancers(self, address=None, name=None, blb_id=None, bcc_id=None, marker=None, max_keys=None, config=None): """ Return a list of LoadBalancers :param address: Intranet service address in dotted decimal notation :type address: string :param name: name of LoadBalancer to describe :type name: string :param blb_id: id of LoadBalancer to describe :type blb_id: string :param bcc_id: bcc which bind the LoadBalancers :type bcc_id: string :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb') params = {} if address is not None: params[b'address'] = address if name is not None: params[b'name'] = name if blb_id is not None: params[b'blbId'] = blb_id if bcc_id is not None: params[b'bccId'] = bcc_id if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_loadbalancer_detail(self, blb_id, config=None): """ Return detail imformation of specific LoadBalancer :param blb_id: id of LoadBalancer to describe :type blb_id: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) return self._send_request(http_methods.GET, path, config=config) @required(blb_id=(bytes, str)) def delete_app_loadbalancer(self, blb_id, client_token=None, config=None): """ delete the LoadBalancer owned by the user. :param blb_id: id of LoadBalancer to describe :type blb_id: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token return self._send_request(http_methods.DELETE, path, params=params, config=config) """ Listener API """ @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_tcp_listener(self, blb_id, listener_port, scheduler, client_token=None, config=None): """ Create a app tcp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_udp_listener(self, blb_id, listener_port, scheduler, client_token=None, config=None): """ Create a app udp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_http_listener(self, blb_id, listener_port, scheduler, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, redirect_port=None, client_token=None, config=None): """ Create a app http listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is,the request sent by the same client will reach the same backend server :value true or false default:false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite' default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000 default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: False :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout:int :param redirect_port: Forward the request received by this listener to the HTTPS listener, which is specified by the HTTPS listener. :type redirect_port:int :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler)} if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if redirect_port is not None: body['redirectPort'] = redirect_port return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str), cert_ids=list) def create_app_https_listener(self, blb_id, listener_port, scheduler, cert_ids, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ Create a app https listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The certificate to be loaded by the listener. :type cert_ids: List<String> :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default: false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: flase :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler), 'certIds': cert_ids} if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if ie6_compatible is not None: body['ie6Compatible'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str), cert_ids=list) def create_app_ssl_listener(self, blb_id, listener_port, scheduler, cert_ids, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ Create a app ssl listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids: List<String> :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler), 'certIds': cert_ids} if ie6_compatible is not None: body['ie6Compatible'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_tcp_listener(self, blb_id, listener_port, scheduler=None, client_token=None, config=None): """ update a app tcp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id:string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port:int :param scheduler balancing algorithm :value 'RoundRobin'or'LeastConnection'or'Hash' :type scheduler:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_udp_listener(self, blb_id, listener_port, scheduler=None, client_token=None, config=None): """ update a app udp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id:string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port:int :param scheduler balancing algorithm :value 'RoundRobin'or'LeastConnection'or'Hash' :type scheduler:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = { 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_http_listener(self, blb_id, listener_port, scheduler=None, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, redirect_port=None, client_token=None, config=None): """ update a app http listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: Port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: Balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default:false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden,if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: flase :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param redirect_port: Forward the request received by this listener to the HTTPS listener, which is specified by the HTTPS listener. :type redirect_port: int :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if redirect_port is not None: body['redirectPort'] = redirect_port return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_https_listener(self, blb_id, listener_port, scheduler=None, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, cert_ids=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ update a app https listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: Port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: Balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default: false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default: insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden,if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: False :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids:List<String> :param ie6_compatible: Is it compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if cert_ids is not None: body['certIds'] = cert_ids if ie6_compatible is not None: body['compatibleIE'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_ssl_listener(self, blb_id, listener_port, scheduler=None, cert_ids=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ update a app ssl listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids: List<String> :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if cert_ids is not None: body['certIds'] = cert_ids if ie6_compatible is not None: body['compatibleIE'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_tcp_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app tcp listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_udp_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app udp listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_http_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app http listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_https_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app https listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_ssl_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app ssl listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), portList=list) def delete_app_listeners(self, blb_id, portList, client_token=None, config=None): """ Release app listener under the specified LoadBalancer, the listener is specified by listening to the port. :param blb_id: id of LoadBalancer :type blb_id:string :param portList: The ports of listeners to be released :type portList:list<int> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'listener') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['portList'] = portList return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, app_policy_vos=list) def create_policys(self, blb_id, listener_port, app_policy_vos, client_token=None, config=None): """ Create policys. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param app_policy_vos policy list the listener binds. If the listener type is TCP, there is only one policy and only the full match is supported. https://cloud.baidu.com/doc/BLB/API.html#AppPolicy :type app_policy_vos: list<AppPolicy> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'appPolicyVos': app_policy_vos } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def describe_policys(self, blb_id, listener_port, marker=None, max_keys=None, config=None): """ get policys :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port used by listener :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} params[b'port'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, policys_list=list) def delete_policys(self, blb_id, listener_port, policys_list, client_token=None, config=None): """ Release the listener under the specified LoadBalancer, the listener is specified by listening to the port. :param blb_id: id of LoadBalancer :type blb_id:string :param listener_port The listener port used by listener :type listener_port:int :param policys_list All policy identifiers to be released :type policys_list:list<str> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'port': listener_port, 'policyIdList': policys_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) """ ServerGroup API """ @required(blb_id=(bytes, str)) def create_app_server_group(self, blb_id, name=None, desc=None, backend_server_list=None, client_token=None, config=None): """ create server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param name: name of server group :type name:string :param desc: description of server group :type desc:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) if backend_server_list is not None: body['backendServerList'] = backend_server_list return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def update_app_server_group(self, blb_id, sg_id, name=None, desc=None, client_token=None, config=None): """ update the information of the app server group of the specified LoadBalancer :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group to be updated :type sg_id:string :param name: name of server group :type name:string :param desc: description of server group :type desc:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['sgId'] = compat.convert_to_string(sg_id) if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_server_group(self, blb_id, name=None, exactly_match=None, marker=None, max_keys=None, config=None): """ Query the imformation of app server group of the specified LoadBalancer :param blb_id: Id of LoadBalancer :type blb_id:string :param name: name of server group :type name:string :param exactly_match: Set whether the name matches globally :type exactly_match:boolean :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys: The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if name is not None: params[b'name'] = name if exactly_match is not None: params[b'exactlyMatch'] = exactly_match if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def delete_app_server_group(self, blb_id, sg_id, client_token=None, config=None): """ delete the app server group of the specified LoadBalancer, :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group to be updated :type sg_id:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} params[b'delete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['sgId'] = compat.convert_to_string(sg_id) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port=int, protocol_type=(bytes, str)) def create_app_server_group_port(self, blb_id, sg_id, port, protocol_type, health_check=None, health_check_port=None, health_check_urlpath=None, health_check_timeout_insecond=None, health_check_interval_insecond=None, health_check_down_retry=None, health_check_up_retry=None, health_check_normal_status=None, client_token=None, config=None): """ create server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port: Port number, integer between 1 and 65535 :type port:string :param protocol_type: Protocol type of listening port, "TCP"/"UDP"/"HTTP" :type protocol_type:string :param health_check: Health check protocol :value 'HTTP' or 'TCP',default:'HTTP' :type health_check: string :param health_check_port: Health check port, the default is the same as port :type health_check_port: int :param health_check_urlpath: Health check URI, default '/'. Effective when the health check protocol is "HTTP" :type health_check_urlpath: string :param health_check_timeout_insecond: Health check timeout (unit: second) :value 1-60, default: 3 :type health_check_timeout_insecond: int :param health_check_interval_insecond: Health check interval (unit: second) :value 1-10, default: 3 :type health_check_interval_insecond: int :param health_check_down_retry: The unhealthy down retry, that is, how many consecutive health check failures, shields the backend server. :value 2-5, default: 3 :type health_check_down_retry: int :param health_check_up_retry: Health up retry, that is, how many consecutive health checks are successful, then re-use the back-end server :value:2-5, default: 3 :type health_check_up_retry: int :param health_check_normal_status: The HTTP status code when the health check is normal supports a combination of five types of status codes, such as "http_1xx|http_2xx", Effective when the health check protocol is "HTTP" :value default: http_2xx|http_3xx :type health_check_normal_status: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'port': port, 'type': compat.convert_to_string(protocol_type) } if health_check is not None: body['healthCheck'] = compat.convert_to_string(health_check) if health_check_port is not None: body['healthCheckPort'] = health_check_port if health_check_urlpath is not None: body['healthCheckUrlPath'] = \ compat.convert_to_string(health_check_urlpath) if health_check_timeout_insecond is not None: body['healthCheckTimeoutInSecond'] = health_check_timeout_insecond if health_check_interval_insecond is not None: body['healthCheckIntervalInSecond'] = health_check_interval_insecond if health_check_down_retry is not None: body['healthCheckDownRetry'] = health_check_down_retry if health_check_up_retry is not None: body['healthCheckUpRetry'] = health_check_up_retry if health_check_normal_status is not None: body['healthCheckNormalStatus'] = \ compat.convert_to_string(health_check_normal_status) return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port_id=(bytes, str)) def update_app_server_group_port(self, blb_id, sg_id, port_id, health_check=None, health_check_port=None, health_check_urlpath=None, health_check_timeout_insecond=None, health_check_interval_insecond=None, health_check_down_retry=None, health_check_up_retry=None, health_check_normal_status=None, client_token=None, config=None): """ update server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port_id: The id of the server group port to be updated :type port_id:string :param health_check: Health check protocol :value 'HTTP' or 'TCP',default:'HTTP' :type health_check: string :param health_check_port: Health check port, the default is the same as port :type health_check_port: int :param health_check_urlpath: Health check URI, default '/'. Effective when the health check protocol is "HTTP" :type health_check_urlpath: string :param health_check_timeout_insecond: Health check timeout (unit: second) :value 1-60, default: 3 :type health_check_timeout_insecond: int :param health_check_interval_insecond: Health check interval (unit: second) :value 1-10, default: 3 :type health_check_interval_insecond: int :param health_check_down_retry: The unhealthy down retry, that is, how many consecutive health check failures, shields the backend server. :value 2-5, default: 3 :type health_check_down_retry: int :param health_check_up_retry: Health up retry, that is, how many consecutive health checks are successful, then re-use the back-end server :value:2-5, default: 3 :type health_check_up_retry: int :param health_check_normal_status: The HTTP status code when the health check is normal supports a combination of five types of status codes, such as "http_1xx|http_2xx", Effective when the health check protocol is "HTTP" :value default: http_2xx|http_3xx :type health_check_normal_status: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'portId': compat.convert_to_string(port_id) } if health_check is not None: body['healthCheck'] = compat.convert_to_string(health_check) if health_check_port is not None: body['healthCheckPort'] = health_check_port if health_check_urlpath is not None: body['healthCheckUrlPath'] = \ compat.convert_to_string(health_check_urlpath) if health_check_timeout_insecond is not None: body['healthCheckTimeoutInSecond'] = health_check_timeout_insecond if health_check_interval_insecond is not None: body['healthCheckIntervalInSecond'] = health_check_interval_insecond if health_check_down_retry is not None: body['healthCheckDownRetry'] = health_check_down_retry if health_check_up_retry is not None: body['healthCheckUpRetry'] = health_check_up_retry if health_check_normal_status is not None: body['healthCheckNormalStatus'] = \ compat.convert_to_string(health_check_normal_status) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port_list=list) def delete_app_server_group_port(self, blb_id, sg_id, port_list, client_token=None, config=None): """ delete server group of the specified LoadBalancer, :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port_list: The ports of listeners to be released :type port_list:list<string> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'portIdList': port_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def create_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ Add backend server for the specified LoadBalancer and server group, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerList': backend_server_list } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def update_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ update backend server for the specified LoadBalancer and server group, support batch update :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerList': backend_server_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_app_blb_rs(self, blb_id, sg_id, marker=None, max_keys=None, config=None): """ Query the list of backend servers under the specified LoadBalancer and server group :param blb_id: Id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys: The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} params[b'sgId'] = compat.convert_to_string(sg_id) if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def delete_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ delete backend server for the specified LoadBalancer and server group, support batch delete :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be deleted :type backend_server_list:List<string> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerIdList': backend_server_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_rs_mount(self, blb_id, sg_id, config=None): """ describe servers of specific server group :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrsmount') params = { 'sgId': compat.convert_to_string(sg_id) } return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_rs_unmount(self, blb_id, sg_id, config=None): """ describe servers of specific server group :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrsunmount') params = { 'sgId': compat.convert_to_string(sg_id) } return self._send_request(http_methods.GET, path, params=params, config=config) def generate_client_token_by_uuid(): """ The default method to generate the random string for client_token if the optional parameter client_token is not specified by the user. :return: :rtype string """ return str(uuid.uuid4()) generate_client_token = generate_client_token_by_uuid
36.48476
85
0.580481
9,775
87,381
5.021074
0.045422
0.018337
0.019437
0.020395
0.933477
0.928445
0.919256
0.913164
0.903405
0.896457
0
0.005411
0.354986
87,381
2,394
86
36.5
0.865406
0.424989
0
0.831409
0
0
0.070373
0.006486
0
0
0
0
0
1
0.047344
false
0
0.015012
0
0.112009
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d9b57c49485840cca689cb6060c1e7555950bf51
104,290
bzl
Python
rust/known_shas.bzl
meteorcloudy/rules_rust
215a8decfb06525a3f13b23fac5b3124eedabd27
[ "Apache-2.0" ]
null
null
null
rust/known_shas.bzl
meteorcloudy/rules_rust
215a8decfb06525a3f13b23fac5b3124eedabd27
[ "Apache-2.0" ]
null
null
null
rust/known_shas.bzl
meteorcloudy/rules_rust
215a8decfb06525a3f13b23fac5b3124eedabd27
[ "Apache-2.0" ]
1
2021-06-21T20:35:33.000Z
2021-06-21T20:35:33.000Z
"""A module containing a mapping of Rust tools to checksums This is a generated file -- see //util:fetch_shas """ FILE_KEY_TO_SHA = { "2018-10-30/llvm-tools-beta-aarch64-unknown-linux-gnu": "9417a07c501e173fe3848c815b8536cf70c6518c8040d45b19260ca3ab720760", "2018-10-30/llvm-tools-beta-x86_64-apple-darwin": "b86d22bc723936f23186acaa94cd0738ff1c7b703d67712be62d99845a6ccc80", "2018-10-30/llvm-tools-beta-x86_64-pc-windows-msvc": "cf74e15df51033370d4225fd9141f4cfc5c37145070d2296915fbecab9275b03", "2018-10-30/llvm-tools-beta-x86_64-unknown-freebsd": "38ba85ec56c374b606a586221185ac351d04871892055f8936f28958e5e7a5cb", "2018-10-30/llvm-tools-beta-x86_64-unknown-linux-gnu": "8d96f1475fc27f21d80bda80496783aa3181a4b33a138b7a89edcef2ddb6cf58", "2018-10-30/rust-beta-aarch64-unknown-linux-gnu": "e9bc9d4a89299595ef42cde7303b50a7921e20a5ee4d131c4e1770a391611303", "2018-10-30/rust-beta-x86_64-apple-darwin": "51acc0077d6abae5beed0e7b99c39ae9b8a0ff0f66daec227caef653046144e6", "2018-10-30/rust-beta-x86_64-pc-windows-msvc": "07f3e42ba299b3b5341b410c4317161eba4b40bbc9fc449a9c187193fe49250c", "2018-10-30/rust-beta-x86_64-unknown-freebsd": "49f1efe5bf10319446a298096dea73e478a466daca20a5b93d5263925e4ba9be", "2018-10-30/rust-beta-x86_64-unknown-linux-gnu": "07963c2d6d56d077856f17e786414cf965832e7942f4ec72dec2eb51452e74b7", "2018-10-30/rust-std-beta-aarch64-unknown-linux-gnu": "bf0895ccf65a86c1f51dbbebd0980bd07dcdc3919407fc84f5838cbbcb29e309", "2018-10-30/rust-std-beta-wasm32-unknown-unknown": "5453e05993aaa90c8ac361086dd888a29a48f1b90b2c2184202c4b44b2e5569d", "2018-10-30/rust-std-beta-x86_64-apple-darwin": "96fc1daea8868e176a14706333590b13a41193bd8ed5a711f23a15eceb5c6ce0", "2018-10-30/rust-std-beta-x86_64-pc-windows-msvc": "6af758524d77288cb5c548d26467680d57aeb778063787662b1e00ef3887866a", "2018-10-30/rust-std-beta-x86_64-unknown-freebsd": "5aef62464a5580ab6b38c9e54203db12f90ad42de538b7a5eefc7778b55b6497", "2018-10-30/rust-std-beta-x86_64-unknown-linux-gnu": "34996a688d6a4c3587f873b0a8c86fe1d2fee2a269b6e669b1cb8c6908fb77b8", "2018-10-30/rustc-beta-aarch64-unknown-linux-gnu": "e6bb89261baa494ef98239bde9821b66671de5cd78352a9c100abce3a18ca250", "2018-10-30/rustc-beta-x86_64-apple-darwin": "22153f359b8b98341aa0349233112fff2b9f092988f9d678626207ba29666b5b", "2018-10-30/rustc-beta-x86_64-pc-windows-msvc": "9cd8225f1307aab95b439dbecd70aaa35e03c913ba0897dc7fe3a04755fc15b1", "2018-10-30/rustc-beta-x86_64-unknown-freebsd": "e207562cd5e3a17497e029bcb1cab56d9fa474d788906f13e389a3cb804ea4d6", "2018-10-30/rustc-beta-x86_64-unknown-linux-gnu": "63b69b000cda551f2499ffba6e3f1700acb2b22a47ca9ec9edcc3e578ed086e6", "2018-10-30/rustfmt-beta-aarch64-unknown-linux-gnu": "bd97e8012277d49beececdf4125610c2d0112cd22f8add53f86fd7c6dac5dc0d", "2018-10-30/rustfmt-beta-x86_64-apple-darwin": "2c213f43902104ebefe9eee6fa49aa36e16af972cb7aa1c63d772e5d05f74b59", "2018-10-30/rustfmt-beta-x86_64-pc-windows-msvc": "316331e7aed82251ab3b701845e8b1e707947670498ce4fc364ce1d6813e1340", "2018-10-30/rustfmt-beta-x86_64-unknown-freebsd": "17826e710d5912d23cd981b46b598733cf8833c20dc7d03d150c5e53ad0a38d3", "2018-10-30/rustfmt-beta-x86_64-unknown-linux-gnu": "4af3f9faacd78deef2e8a06e2971ebc7540de07525ac805fcf334818a6ee0e97", "2018-11-01/llvm-tools-beta-aarch64-unknown-linux-gnu": "7b3f5bb7f45052310efe275ed665b54b55e19b9020b04cec9240318c13c62c0d", "2018-11-01/llvm-tools-beta-x86_64-apple-darwin": "b45ae0fb0b49385b555772a3faa9a5a85aa10f4bd24ba40c55048076d8cda314", "2018-11-01/llvm-tools-beta-x86_64-pc-windows-msvc": "462668f55b85fa1224df30449eb67e8fe0d208caeea39d9339b44ac30867f452", "2018-11-01/llvm-tools-beta-x86_64-unknown-freebsd": "acd86ceedad9f10b2cb3443df032d0b5a9b4eb6c4b30364bd3cb014449607577", "2018-11-01/llvm-tools-beta-x86_64-unknown-linux-gnu": "4b4363c2d03d319e84a7f09fba5b58d188d9fdd62c9486cc4b66e9e63030ab27", "2018-11-01/rust-beta-aarch64-unknown-linux-gnu": "f802784788c2e751d59d035363fb6be2b0450d650ec523115a51fea05fc8589b", "2018-11-01/rust-beta-x86_64-apple-darwin": "0be8d634d17b2f92c86515a38e36f66a9f3d72bad226db58ff8cd09924092f53", "2018-11-01/rust-beta-x86_64-pc-windows-msvc": "d86d633b67e6c0fba28963f5c991738575fee56c755570e505f702eed54150e9", "2018-11-01/rust-beta-x86_64-unknown-freebsd": "869f1d01077a318ff0ac664a498354b079c2fc16cbec294161a56cabe6f3e194", "2018-11-01/rust-beta-x86_64-unknown-linux-gnu": "7da7bd24c2f2dafa9d4baa6e7eba1547f62681fbd1dd89910d386b2260e65ca6", "2018-11-01/rust-std-beta-aarch64-unknown-linux-gnu": "137d39872981d343829a8f3eecf0f33fe2f0d81ed18865004b52359d309a6b95", "2018-11-01/rust-std-beta-wasm32-unknown-unknown": "4f7fa7f3adafc2ec5de80cddfd3fc3072da43442cd15be9169b261f76a0a684b", "2018-11-01/rust-std-beta-x86_64-apple-darwin": "bee742244d72ea7289d5d2dea519102994dbee97a5c296b2a8c6853e5450a7ab", "2018-11-01/rust-std-beta-x86_64-pc-windows-msvc": "455ecff7f11499cd3822b82ecf0ab8ab34d866c4b5e17b0de84af815a782f226", "2018-11-01/rust-std-beta-x86_64-unknown-freebsd": "eed13a5c36c0731b01b8926f26be5b054c341a0487628fca688e8e99f33b200b", "2018-11-01/rust-std-beta-x86_64-unknown-linux-gnu": "f38a224bccfc89bd0d598764363271985d0b2696123ea10de6399c4cc7dd8adb", "2018-11-01/rustc-beta-aarch64-unknown-linux-gnu": "acf359a4cecfc827f5ca4255c0492d46223d09d535444f0b303678918944c87c", "2018-11-01/rustc-beta-x86_64-apple-darwin": "64b5a5fc8b3dc348395137df2c422adbf483168c58af5cd9acc8522dd9b4392b", "2018-11-01/rustc-beta-x86_64-pc-windows-msvc": "71cbfd2793f6b55653f5ef4bdf0350dfe6d9b0952d518a3a355044ec7caa03c2", "2018-11-01/rustc-beta-x86_64-unknown-freebsd": "51a5370f1776229bede506e5ab05da7cfeb5bd21a5374561acb4b7138c75d508", "2018-11-01/rustc-beta-x86_64-unknown-linux-gnu": "01609bedca249906ba8e07fb681daffe094cdbf41b91e2221195474271d8e6d7", "2018-11-01/rustfmt-beta-aarch64-unknown-linux-gnu": "0969e37628a3d5e56cfc0636db4d1aac7d0b01ab9df6bdeb3453adc1e2ae786e", "2018-11-01/rustfmt-beta-x86_64-apple-darwin": "bbb45d6beddf8da270ab1ec6ea3b9dc2ffc03f5ba1ef3e99e87d004f652f2581", "2018-11-01/rustfmt-beta-x86_64-pc-windows-msvc": "2d34424353e248173828898107e331acfbaa29a3686bb8121fdc72fbab7a37e8", "2018-11-01/rustfmt-beta-x86_64-unknown-freebsd": "101a0903c5421df363420d73ebbdb52b3cb8b2bb5ecee9e383b271aa22446f95", "2018-11-01/rustfmt-beta-x86_64-unknown-linux-gnu": "12d49bf16a8e5964163d12a58e8caa5bf81fedd106dc37bbd757aa4116f0d7b3", "2018-11-02/llvm-tools-beta-aarch64-unknown-linux-gnu": "f8021a8a0d302eb4774d31789932488e0897ec72b0d8c16cb746a2d72b749238", "2018-11-02/llvm-tools-beta-x86_64-apple-darwin": "88693cabad55f568ca0c21276e76eb237215abdc9771f224ab807f8edd3aad08", "2018-11-02/llvm-tools-beta-x86_64-pc-windows-msvc": "4b2f2c707bbd3506c55d48d1ff3cadd290445b227c868d596a535faa798f06cf", "2018-11-02/llvm-tools-beta-x86_64-unknown-freebsd": "c8c48f8805b794ac63cbd60bb28d77c575d754c6c97af006bcf6b466adb5ecab", "2018-11-02/llvm-tools-beta-x86_64-unknown-linux-gnu": "92472b453b0a6dbc38cef53cba6d38b5dbc79e3637743b77239543b33084d121", "2018-11-02/rust-beta-aarch64-unknown-linux-gnu": "6e28b053d8ebcef7a19875089db13931ce89bb045d06b4bf834334df3fa43962", "2018-11-02/rust-beta-x86_64-apple-darwin": "78e9fd57d2070cbe2a073758839d54b5535e14918c11260fb244bade3c1971a3", "2018-11-02/rust-beta-x86_64-pc-windows-msvc": "68c0fcbd1f9887eff41c42b703fb2c34b99b2490f4e366efeee4a6bf0c1044c2", "2018-11-02/rust-beta-x86_64-unknown-freebsd": "8313e8655fca87519c469c5699cf803fb0c1bd159ee335aac199ae76753f359d", "2018-11-02/rust-beta-x86_64-unknown-linux-gnu": "76b2d14dc01a922b448019df7a24221f91c7eaa3e2034fcbd6189d5bac7836e4", "2018-11-02/rust-std-beta-aarch64-unknown-linux-gnu": "0d57f4837af1443208abce604d74dcf6880d0cdf4e74eaf2368016f064dbc7ab", "2018-11-02/rust-std-beta-wasm32-unknown-unknown": "25c7e8dbf27dd19d3d4f91062d9cb9bc6ad9aed8afacbd47dab92eba3a3d2533", "2018-11-02/rust-std-beta-x86_64-apple-darwin": "fdc26de5db0e66e0f516069690a0c86ca0e4b8b75973a33dee67ce306e4c9115", "2018-11-02/rust-std-beta-x86_64-pc-windows-msvc": "8c7dfe0692e2c0b9de130b64e8ab09946eff2eb419a22bb7a15cd9522995f420", "2018-11-02/rust-std-beta-x86_64-unknown-freebsd": "4661fdd15b5cee0fafd4c9cb085d0614abab0b2b1d62a55540b3d4d2634c4ba7", "2018-11-02/rust-std-beta-x86_64-unknown-linux-gnu": "3e2f68697620e501a9439bb7923f5676c82f7a4b4aaf822a141188c92619fe13", "2018-11-02/rustc-beta-aarch64-unknown-linux-gnu": "d703f1cdecd77aba85024db5e94f13e50c74e66af21091107c7cd67a3179da15", "2018-11-02/rustc-beta-x86_64-apple-darwin": "199a0776ad4f1406b8b6f477d12c58858816f07246d52842384e1084d8c9000e", "2018-11-02/rustc-beta-x86_64-pc-windows-msvc": "a4fd3838f4459a151e83d540784953cd80c5a1a68fe3bf965399c1f07f6476bb", "2018-11-02/rustc-beta-x86_64-unknown-freebsd": "f931cb44b892dc3899c9379238d8f51d35c9503db9e93ce5700f3121712c9b62", "2018-11-02/rustc-beta-x86_64-unknown-linux-gnu": "7ebd46c431b8d9e8d22aa141122eaf301d5facdc449e04872019372598b04b19", "2018-11-02/rustfmt-beta-aarch64-unknown-linux-gnu": "c13f8024e37f7b2b6d97586569bdefca53e99514e4b76b200b71932f4a7ce298", "2018-11-02/rustfmt-beta-x86_64-apple-darwin": "0164c2c57b3ee975c571103dbf7074e24c997cc43df23920660f12de688e8c23", "2018-11-02/rustfmt-beta-x86_64-pc-windows-msvc": "c33b4fe885e21f51b08dfa29d04179d0f138e87122f4f4a3898fa002567f2259", "2018-11-02/rustfmt-beta-x86_64-unknown-freebsd": "ad280d28d4e7054063942cd5a60cc35538af5096f29e159ba13419b82be5560c", "2018-11-02/rustfmt-beta-x86_64-unknown-linux-gnu": "60ec376659a4ad5b129307ffef4dfe9b717fcee6f5d929b1e3e423e599258be2", "2018-11-07/llvm-tools-nightly-aarch64-unknown-linux-gnu": "c2dd30b73e5e0495d7d4c05cf98f50197c00f542cef7f3ef37e095d0d8686991", "2018-11-07/llvm-tools-nightly-x86_64-apple-darwin": "2cd61d0f54d3753777b019d09a2e88cc9a2fb027e947a14eb1f509e014ff19e9", "2018-11-07/llvm-tools-nightly-x86_64-pc-windows-msvc": "14c6387239ee0bd8d9d9ca0c21c808a542de4133e8477fc2f1fc958eaf6c4428", "2018-11-07/llvm-tools-nightly-x86_64-unknown-freebsd": "52a726f57b80cc7d29c15b93ad35e3a7ab62fcfbd7d89f91021a783be6ffbd18", "2018-11-07/llvm-tools-nightly-x86_64-unknown-linux-gnu": "ddd007ee68b7468a12d240d0f08939efa262e8d96ad7f903cbef62b461a61417", "2018-11-07/rust-nightly-aarch64-unknown-linux-gnu": "f4725077e948b6eda7d4cd482a8985037a2f8ddaecc964908a490deb9ac46e21", "2018-11-07/rust-nightly-x86_64-apple-darwin": "b9aaefca51aa2d7e89f5d790e865d00ec4142c79cebee43ba0d575f9f52ee65b", "2018-11-07/rust-nightly-x86_64-pc-windows-msvc": "5826f958e4826b0bd0069918185afc6db0802e6d3fe72a9be075f3408b707521", "2018-11-07/rust-nightly-x86_64-unknown-freebsd": "38860320b7e97193493e45d362f34d311d7aa8fface77c93fadfa15e8361df3d", "2018-11-07/rust-nightly-x86_64-unknown-linux-gnu": "c1d7542e90f76d074a7ea925b5ce40ec602c9e3e04822939623a98c4d020ea2e", "2018-11-07/rust-std-nightly-aarch64-unknown-linux-gnu": "e7a4629ab15609fda17a9e16cb0f7538d4077f572ece200891726344e314295b", "2018-11-07/rust-std-nightly-wasm32-unknown-unknown": "9d1bcbf50fdcd9912fd98901ec40c7fd5d73ef2262a70322b4ca52381363c34c", "2018-11-07/rust-std-nightly-x86_64-apple-darwin": "0c988a60e72d545b19b5cef616ddd3411a278226abccceca622581871f2b5cce", "2018-11-07/rust-std-nightly-x86_64-pc-windows-msvc": "0d04200b3bc5ad5f939d98f7af083c0576aae11876830492f9964de23dd33acd", "2018-11-07/rust-std-nightly-x86_64-unknown-freebsd": "ca648e7eb243cec32dc5a1b4e2fe6d67c2a00be56326e4d7aec9f2bbeb4dc138", "2018-11-07/rust-std-nightly-x86_64-unknown-linux-gnu": "bd8daba5c2d36e261da6f0ea8b5893e7fe94252eca7478d581c036fc1acb7c36", "2018-11-07/rustc-nightly-aarch64-unknown-linux-gnu": "11caf45fef229d85efb36cdbcf955d95fae648c27ca4ffd153bad316eb58793a", "2018-11-07/rustc-nightly-x86_64-apple-darwin": "cddecdb0d595cb8b944bf70b2284f557743f5637536f2181ad0036806cf56217", "2018-11-07/rustc-nightly-x86_64-pc-windows-msvc": "479f58f34616b83c003fa29e68ee84c91ee5521038f255a7cd3b597a2f5082d0", "2018-11-07/rustc-nightly-x86_64-unknown-freebsd": "47f81ec8c4ebbcd4e948033b5db72c1e9bec6f284fdaa5bdf59bcc92b075333f", "2018-11-07/rustc-nightly-x86_64-unknown-linux-gnu": "7f1aa11f8e503e6e9a03b6cd05ab12b46837bb7597167c72112abaf1481e46cd", "2018-11-07/rustfmt-nightly-aarch64-unknown-linux-gnu": "9b64705de20633c73a39d47deacbacdce11181dc5e06fa632ef08b8a3a7136e0", "2018-11-07/rustfmt-nightly-x86_64-apple-darwin": "bb86c58a5a12922ddd2d19a3cffe9cd8d87785e57f72cdc998e94926a68345a2", "2018-11-07/rustfmt-nightly-x86_64-pc-windows-msvc": "4612d2961bc5d0b24863af21fd0764b8ec8d2a843f1a39b97b418bc283b8fa2b", "2018-11-07/rustfmt-nightly-x86_64-unknown-freebsd": "b0677e91bb7c0645844f988e7f7b625768f0a72947536a27fce7f5a9a850c5e0", "2018-11-07/rustfmt-nightly-x86_64-unknown-linux-gnu": "e1dbad0cb0afccdf5e05f97129f9d34bc62ce6475dd8f09fad2d31a8129acf64", "2018-11-08/llvm-tools-nightly-aarch64-unknown-linux-gnu": "fe2283cabacf6b8dbcd8d11eddb11a2badb750091ec5b38926ad48d3577da601", "2018-11-08/llvm-tools-nightly-x86_64-apple-darwin": "b173d662715ec4edacdb8e06570cd471a6f63d05b49c6867f95ec366f8a2e0db", "2018-11-08/llvm-tools-nightly-x86_64-pc-windows-msvc": "558cd81ceb5e08766ade45cffc98a8d1a179d34f671f9b9518627e8358d65984", "2018-11-08/llvm-tools-nightly-x86_64-unknown-freebsd": "7d77f85900b0aa276df746bc023b1211c4221e6183ef62663565b65b8fadd9f8", "2018-11-08/llvm-tools-nightly-x86_64-unknown-linux-gnu": "c634eb65a3839b176563823d576b1f4705cfcb3e91b237f2c4f852ff5ba08d2a", "2018-11-08/rust-nightly-aarch64-unknown-linux-gnu": "20ef5c5f59171df0335846c6c3315c5e3c495775e3c5b1060481d70421153412", "2018-11-08/rust-nightly-x86_64-apple-darwin": "921f19787a155e5240e21fb2bc630e5907b964652b0f7553b64acf819a0a2d43", "2018-11-08/rust-nightly-x86_64-pc-windows-msvc": "46d76bb12cedd53927cb35fb688540010b5152568467508fa92b0745f0e39463", "2018-11-08/rust-nightly-x86_64-unknown-freebsd": "32c28d8e915086406a3493d59d0e3b4c4751f77a3b34d257a3341aa4e5f8ad4f", "2018-11-08/rust-nightly-x86_64-unknown-linux-gnu": "5f33f1c01720e471b8293d304a01f354363418dc7ceebf206529a34f932c3a82", "2018-11-08/rust-std-nightly-aarch64-unknown-linux-gnu": "e83ecf484a848053a8679c8164340f90bd6c5823d9340b4fc5318c6265e544f1", "2018-11-08/rust-std-nightly-wasm32-unknown-unknown": "0e12ecd9a2bbff67b8d82c15200acdd32d1f91fc1761d0b72fbbb5d32ae629f9", "2018-11-08/rust-std-nightly-x86_64-apple-darwin": "9d5e89e71f888247093b5615079da538a56c2758eac270173a4f85a57ef92967", "2018-11-08/rust-std-nightly-x86_64-pc-windows-msvc": "e891a3ee103e65e8e337b3c9c9d1e410c4be97b1318f820b591565b5ae6340ff", "2018-11-08/rust-std-nightly-x86_64-unknown-freebsd": "64aabfec15a2b773c27892e58514161cb05ab370e3291beb1cafc7d270772389", "2018-11-08/rust-std-nightly-x86_64-unknown-linux-gnu": "efb8f6f6aa2c5a3f1c069e05b74fde6a85985837054faf3bc565d839902efedc", "2018-11-08/rustc-nightly-aarch64-unknown-linux-gnu": "494173aa705efeef4df2d88278608bd71b477183d85a670a577051c76c5ee99c", "2018-11-08/rustc-nightly-x86_64-apple-darwin": "316e7727a136a82a20832a69b18f74add335e9b659fa7e0d8c7d12c0d11224b7", "2018-11-08/rustc-nightly-x86_64-pc-windows-msvc": "489cb54446374eccc78eca18aa86b4159d47fdfa7bab0ea9a20cb68fa4d80071", "2018-11-08/rustc-nightly-x86_64-unknown-freebsd": "874b7055e0cb609ce34d38456bda888865c63fcbc7abac5aad147f2a21a7d147", "2018-11-08/rustc-nightly-x86_64-unknown-linux-gnu": "e50e43d71573e069503aa6157d1736d390345006965fa889842835ce80ae36e2", "2018-11-08/rustfmt-nightly-aarch64-unknown-linux-gnu": "4f5f4077d0e1c888fa3366dad638b5e8c7aa032deab83334a170d8e4275d8e47", "2018-11-08/rustfmt-nightly-x86_64-apple-darwin": "b6a0f812726134aea52e2b6ad708c0fb1052f80f1515a66cddeeef07052a67cb", "2018-11-08/rustfmt-nightly-x86_64-pc-windows-msvc": "eb444b276ae5f6ed2c1e6dce994e17ebb94b130747a05c402e0c96b2623a554d", "2018-11-08/rustfmt-nightly-x86_64-unknown-freebsd": "8b31f7677eb0e7bf6ab145a6347e0ff00e57ec3642db3269763d97020cad2ebb", "2018-11-08/rustfmt-nightly-x86_64-unknown-linux-gnu": "bf4c5913c199a5cfeea53432c880a02ba1ec6b38eaa59f012a909a131cf11cf6", "2018-11-09/llvm-tools-nightly-aarch64-unknown-linux-gnu": "83188eccc2b7067dcfc960492f91d23ad36ea6460005ac6b91c98d20694e60a6", "2018-11-09/llvm-tools-nightly-x86_64-apple-darwin": "03e2e4ba7ffabe88b118f1207b820cc6c7ab0d79d478c1687ec5bb1c903b4045", "2018-11-09/llvm-tools-nightly-x86_64-pc-windows-msvc": "63a4f2c85f3ef51efb0075944ad0249337cdd7c9593036995f79699393a458d7", "2018-11-09/llvm-tools-nightly-x86_64-unknown-freebsd": "f9ddf5f1e02b800aaa0add32365d62e5dcf590cc130af5b209cdf8520f9262a1", "2018-11-09/llvm-tools-nightly-x86_64-unknown-linux-gnu": "8b3e2e2bf77224e181a9b1987bd2ae940a0462f8b0af84b59de484f8fe96ffb8", "2018-11-09/rust-nightly-aarch64-unknown-linux-gnu": "4d0f22349061a40a834ae6a40640c0f4e8a19f068a215af0fb0b9a7250942d3f", "2018-11-09/rust-nightly-x86_64-apple-darwin": "934b83bbfcbca605875103293cf691a56429661e929e1c29fec2d3c5c1d65143", "2018-11-09/rust-nightly-x86_64-pc-windows-msvc": "6659d7b9001a3613ca6b2bb64e5f6bd67cae51bf02e81b8b96dfe2299a180a21", "2018-11-09/rust-nightly-x86_64-unknown-freebsd": "b50b1cf51e8bd138d55dc77f681904e1b431e7c956951ec603a3d94ff81a0783", "2018-11-09/rust-nightly-x86_64-unknown-linux-gnu": "163e0666f2f7179caa9c5baa8b0280c618dc163007a73f5da0a0c917bd2b8902", "2018-11-09/rust-std-nightly-aarch64-unknown-linux-gnu": "cb12c26ec032ede34f925ea7c57118c8694dee439f0e258f8655b83e08512a43", "2018-11-09/rust-std-nightly-wasm32-unknown-unknown": "a0084c768151b5cb7554085b77fdbbc014a1ba246335623a36b58e7f6bb95fb0", "2018-11-09/rust-std-nightly-x86_64-apple-darwin": "dbc9ffa483484380e41b6514465523f6ef106be5708374b714458d14f76149c4", "2018-11-09/rust-std-nightly-x86_64-pc-windows-msvc": "40e4194f3abd9c1eb97c3783009571f96d83e80018662c4ff6fd60e992b50ee4", "2018-11-09/rust-std-nightly-x86_64-unknown-freebsd": "ab8a32d8efb0ab4686526c6cf1380161e87a89015464f5d5f5438c99723675c7", "2018-11-09/rust-std-nightly-x86_64-unknown-linux-gnu": "1418ba09f97c6ba91e2df5ba0b11cf1c53498710bc6a147fe8f4be455a96c4d8", "2018-11-09/rustc-nightly-aarch64-unknown-linux-gnu": "167fec713804d8af1fa4f543e79ca5cee259f1b966b8e04c99efba75901f4c8e", "2018-11-09/rustc-nightly-x86_64-apple-darwin": "55ca5ad85b0afd61a419e374f8e6320b4f4fe30f8092005cdec9e63103812ea7", "2018-11-09/rustc-nightly-x86_64-pc-windows-msvc": "dd19c5a4b209a9f46dd2f99eb7ec0898bd00accf1c6e8a97222c580bcf62e32a", "2018-11-09/rustc-nightly-x86_64-unknown-freebsd": "bd6bb0228aeab01f425cb2ad55b2e0409b43e79450c2830183a6878cc2d2bdc4", "2018-11-09/rustc-nightly-x86_64-unknown-linux-gnu": "2c475f886123353c9388322da6e13a67b6ae902d8c249f8e95fde67429f7bf37", "2018-11-09/rustfmt-nightly-aarch64-unknown-linux-gnu": "159353d0fc3b7d6aea127df348d7e824da79f995bf286df0bf03ed0615b7e875", "2018-11-09/rustfmt-nightly-x86_64-apple-darwin": "940a39cc86d1cbb02535065d40993cc52acb223487c9efd4ce396950b6a72ed6", "2018-11-09/rustfmt-nightly-x86_64-pc-windows-msvc": "896305dd7fb4975ccdf54c569faf05ce0ff9a13fb0b226904fb6594ed5e5c03a", "2018-11-09/rustfmt-nightly-x86_64-unknown-freebsd": "7c313ee99e0bd3ac8ec13b576b07e6e64b0eed22505ddef1710bf2c7b1236378", "2018-11-09/rustfmt-nightly-x86_64-unknown-linux-gnu": "4d1dbb88662353ea4bc353ec4d73600d72af0fc51f54dc3f0b8ee0b0aef05a15", "2020-02-16/llvm-tools-nightly-aarch64-unknown-linux-gnu": "f0de4de8e364ee8e0aefc07500caca3917d79ceb4fd52a1602b5985b4c40ec71", "2020-02-16/llvm-tools-nightly-x86_64-apple-darwin": "b9ec36e5c51f2dc1051e4b5831a49c096836f95c8bb87c19d6fa12bcacaaa914", "2020-02-16/llvm-tools-nightly-x86_64-pc-windows-msvc": "4a2b966614e6bb7a1fc054ce42707c8a2b082b2f28c76c8954a19a65ea35476c", "2020-02-16/llvm-tools-nightly-x86_64-unknown-freebsd": "235fa158239a4b498846eebff92639a87c708056e1a91215377f0d485e354c08", "2020-02-16/llvm-tools-nightly-x86_64-unknown-linux-gnu": "f066cf2b315d0b6edc95d1b5b1b5b7a2275928045f4b2e4329144ca9cee85b6b", "2020-02-16/rust-nightly-aarch64-unknown-linux-gnu": "9edd6c4c0d1b8626c905d91d36330fd9c2671d33f82d5bcd4413bb8696fb628f", "2020-02-16/rust-nightly-x86_64-apple-darwin": "608d8747aa928b128b4da9565327fe791ebc787b96e80f09ef84676f3a0a3efc", "2020-02-16/rust-nightly-x86_64-pc-windows-msvc": "735f5f2762ff94b04e70209e46a57202a13a65a8b12a403b620f0896c4fedaa2", "2020-02-16/rust-nightly-x86_64-unknown-freebsd": "69dfcc2b029da84f68c5d543af8262a4735be574a29035d34e452932fcd66643", "2020-02-16/rust-nightly-x86_64-unknown-linux-gnu": "b4f6ce68cc5fde78dc7ab06db6a6f30abde85ba6e5360ea3f75fb8c80232ad38", "2020-02-16/rust-std-nightly-aarch64-unknown-linux-gnu": "ef3c2edd450ef3ef214a5cc412de4527631f9324a28168997233a9ebea6f08c9", "2020-02-16/rust-std-nightly-wasm32-unknown-unknown": "dcc9ce64c62e2100b35194b6a9ed3d9a7572e1bbf28ca09da687af82ff1dbfc9", "2020-02-16/rust-std-nightly-wasm32-wasi": "6e3c13f44ea6e997b5fb0a3818ee8cb850c9654857a438f6e8df42a6e1decf75", "2020-02-16/rust-std-nightly-x86_64-apple-darwin": "d391be4bdb713356fb34cdc03475a830e6bd4476639c46ef19a8a4c05513bc4a", "2020-02-16/rust-std-nightly-x86_64-pc-windows-msvc": "5881bc3954fe5c7a8080176aac4fae95bc079d020c9c68f9fc7d1064470c4493", "2020-02-16/rust-std-nightly-x86_64-unknown-freebsd": "6575eabdfaed4b0490cdfffcbb5860036dcc36bebdabc58d839c088ff5556a6f", "2020-02-16/rust-std-nightly-x86_64-unknown-linux-gnu": "28a169e9b0f0986a50254caf14be863cf6f1ed3aec8342a7fa756dc1af76f38b", "2020-02-16/rustc-nightly-aarch64-unknown-linux-gnu": "e9cf265820f69331abc9a7c4da0c26febffd4017cf4e6d0840d4ed22b3dd332b", "2020-02-16/rustc-nightly-x86_64-apple-darwin": "db0338b3e1934147dce0bf6420d9c147caa6aef2db1aca44ca8fef47b7247615", "2020-02-16/rustc-nightly-x86_64-pc-windows-msvc": "d51440d4004e49670c5cf803f96aa222c68f09348bfca46f6e0d4c8728908065", "2020-02-16/rustc-nightly-x86_64-unknown-freebsd": "c76fa125e6d17b16a96b01a875d826f20849b09970b49ed1183601a0e7803f6f", "2020-02-16/rustc-nightly-x86_64-unknown-linux-gnu": "456af585ad4408ab5f0c7500264ebb4a5f6338c0aed642edb81224ec6146b546", "2020-02-16/rustfmt-nightly-aarch64-unknown-linux-gnu": "76b5fb48db5de274950f86a9b1cb69738311d2302da3e079ec772302aacfd999", "2020-02-16/rustfmt-nightly-x86_64-apple-darwin": "77b467fec83ea6d8f2b4e4e186806d77ae7ecfab1de618f4a7d857aaa7f6823f", "2020-02-16/rustfmt-nightly-x86_64-pc-windows-msvc": "eee4d08ac820d85491a9f13909f178dbdcc54edc5d98d2e433a073c6b1aa611a", "2020-02-16/rustfmt-nightly-x86_64-unknown-freebsd": "a47062919f16888d2baa58a640299a5a9ece3f0d6537dea6e6241ac0d8877e7c", "2020-02-16/rustfmt-nightly-x86_64-unknown-linux-gnu": "65513b8ca698f6859af19be665bead97271e7dbac3bc6058256ede1d7340aea5", "2020-11-10/llvm-tools-nightly-aarch64-apple-darwin": "e2b1803548aeedb1e6b51724c9cbab123626fc88846eae53adf0a1c55d4a364e", "2020-11-10/llvm-tools-nightly-aarch64-unknown-linux-gnu": "442255a2859c2e3345c8ceea7a28359fb02d42460ecf51d92395c1dd85c9a8af", "2020-11-10/llvm-tools-nightly-x86_64-apple-darwin": "9ae35e98f8f930257bb103ae1cffda42476338838f490a62ba9d93638ce122ec", "2020-11-10/llvm-tools-nightly-x86_64-pc-windows-msvc": "4106227bf29d1dbaa35ddc12f8a2c2f16ef27fe21971f6f7ed0d4356691a4055", "2020-11-10/llvm-tools-nightly-x86_64-unknown-freebsd": "91dbd775c36f8b29bd688a1e75b10ab065928622985aee7e96848952ab6d85d9", "2020-11-10/llvm-tools-nightly-x86_64-unknown-linux-gnu": "532a0883b16bbaa70bb2e9ba6c769594db35b1aaacbfa9ef06631a91bfe8048a", "2020-11-10/rust-nightly-aarch64-apple-darwin": "bcb30524c7f4520bda573d31962ce5b058cd9b6d05db83431b182483071fb429", "2020-11-10/rust-nightly-aarch64-unknown-linux-gnu": "7d6453eaf2640a9979707e6ef92b8dcfed33bd7bf5a7696d8efdab05bed182d0", "2020-11-10/rust-nightly-x86_64-apple-darwin": "106395f200ef0e6d08baad05d5da786dd17c612d25ba5d7c65a7031d52af9bd5", "2020-11-10/rust-nightly-x86_64-pc-windows-msvc": "6a28970950157102e0ea6799da0235483cad141b2cf112718abcaa19ed81170e", "2020-11-10/rust-nightly-x86_64-unknown-freebsd": "dce8b0971da6c265190d0c14cee3e4d82ad24ca224398ef3002a870f3db31fce", "2020-11-10/rust-nightly-x86_64-unknown-linux-gnu": "29696ffa840261dec1a27018054599a93f49facfa6813f7ad1a875cfb1fc6fd7", "2020-11-10/rust-std-nightly-aarch64-apple-darwin": "80e57cd44992e0a9a29bb0233a4c7301369c7f00e9a63f89e944c5fd75931d40", "2020-11-10/rust-std-nightly-aarch64-unknown-linux-gnu": "a4d0fa574f93e530f421651ec38f5374fdc8be20de717c435750bfcf0ae15f36", "2020-11-10/rust-std-nightly-wasm32-unknown-unknown": "636cb560095c23e12d629ea21dc85af954c2fcb2df57f25b40f11826d7547a46", "2020-11-10/rust-std-nightly-wasm32-wasi": "baf705571736331dd5449e1473b590477e2a48ad0adc6a897516fb8f1a5780fd", "2020-11-10/rust-std-nightly-x86_64-apple-darwin": "2b0d1758c20fea48e8afa5c9cc2844e9eb5c77376992f2af1e68261e1b0bd773", "2020-11-10/rust-std-nightly-x86_64-pc-windows-msvc": "9e5f1089de87e9d54417038f7d5d30de5e604bf82f5be557361cd02b55abb018", "2020-11-10/rust-std-nightly-x86_64-unknown-freebsd": "24a506f85e178be0799d12f9354c6129004068552c8f5321a519b033631b815d", "2020-11-10/rust-std-nightly-x86_64-unknown-linux-gnu": "367c14b7fbe98e264b0e4b5a9ddaf3f78ce3ce09bbef4c7be33a3f2abade9ad9", "2020-11-10/rustc-nightly-aarch64-apple-darwin": "a461f2486013b5cec450c8f79230e83878689b803a38df7304adea27b025ef1b", "2020-11-10/rustc-nightly-aarch64-unknown-linux-gnu": "900170006c4c2d88cadc0d915d410588cb80150817e53aa7fca41a459a5ec500", "2020-11-10/rustc-nightly-x86_64-apple-darwin": "7a443dfb068bb7e3854dd6475564da33a57d3f225ce03ad8bc973e8900960b69", "2020-11-10/rustc-nightly-x86_64-pc-windows-msvc": "e7b325e55d372aaf4be400273673711fe78271b655c0b710d62a972b8044b9ef", "2020-11-10/rustc-nightly-x86_64-unknown-freebsd": "3ef55f82aefad5eac4398977d34b1963feb05b1cd654005d385da26624cb2f7e", "2020-11-10/rustc-nightly-x86_64-unknown-linux-gnu": "7498af27587f4ff235b0477199eec4128a65f54d4c05e4ddb9c632685ec526b4", "2020-11-10/rustfmt-nightly-aarch64-apple-darwin": "e4e992764d26792ba901fe4c9590cbb7a72a8a71b524f54ae7b1312d2824bca4", "2020-11-10/rustfmt-nightly-aarch64-unknown-linux-gnu": "7ec62c4aaa8a89f94e037c39907caaea942c9fb44e5dbadd65be7b9c8650c594", "2020-11-10/rustfmt-nightly-x86_64-apple-darwin": "c169fbd9b21ddff9e7558f8674755410d170aea6521cccadd06a14d1091870c3", "2020-11-10/rustfmt-nightly-x86_64-pc-windows-msvc": "d3fa6a30d2be44c636478c0259337c6f449a55bd9f037cbe0600a18da143c2e5", "2020-11-10/rustfmt-nightly-x86_64-unknown-freebsd": "3681fa62a68c50d0de839dfe424e30ae72d8635e15267042191bb10195d265fb", "2020-11-10/rustfmt-nightly-x86_64-unknown-linux-gnu": "75e17c1e4bcfa70669aefda8ba34a7e8d6e0f5d842096b98135f3447b37d3538", "2020-12-30/llvm-tools-beta-aarch64-apple-darwin": "d6e3e50a19aa45863ba5e37f316bf928f6eca96c3fa749b9ba87cddb3608a659", "2020-12-30/llvm-tools-beta-aarch64-unknown-linux-gnu": "e1f3d55116386fbecdcaacb879dc19a62a9c9bb0d06581a366a53e84a5bc4d8e", "2020-12-30/llvm-tools-beta-x86_64-apple-darwin": "b5a1a1b3d2d316e8d66876736462b6c8b08951e7fbfa3568da19caeb976e9fa9", "2020-12-30/llvm-tools-beta-x86_64-pc-windows-msvc": "a10198ef08e9e58bb1a54ec23368d5df02c87b6618e16ab026afcc5c8f9cef6e", "2020-12-30/llvm-tools-beta-x86_64-unknown-freebsd": "79df94c33935de84d0cd0e985c333c2516551a9fab3c8fb7c5b93cf3b0d0e22e", "2020-12-30/llvm-tools-beta-x86_64-unknown-linux-gnu": "6e4d8501fc7c5c69b4a5b532021a0e39e125b6fedc12b1afe4ba22d07e0b995e", "2020-12-30/llvm-tools-nightly-aarch64-apple-darwin": "8e2f796ace0270fd2fde8bffe4db90fa1b09947032ee705bd99a1628a1138b95", "2020-12-30/llvm-tools-nightly-aarch64-unknown-linux-gnu": "3f59253b666c05faef7a3b9b1b761ac6ae4f83833996495936629f41fc4c6959", "2020-12-30/llvm-tools-nightly-x86_64-apple-darwin": "8aca7ddf73983bf2db4846721787547fed16c2ad4dc5c260f7f05f6b93cea8e7", "2020-12-30/llvm-tools-nightly-x86_64-pc-windows-msvc": "f30202ade378ade7a1a4bf23381ae69525154ce009aa54b9d59d6507000bf301", "2020-12-30/llvm-tools-nightly-x86_64-unknown-freebsd": "7f1837f6c8e26232cad456df8ce9104cbc6eea8a57e3290a8c72286c0e9fe803", "2020-12-30/llvm-tools-nightly-x86_64-unknown-linux-gnu": "b1ee3b5cafd026432c74ab9eda4f797d1aa55d06a38438f84f29be528887e540", "2020-12-30/rust-beta-aarch64-apple-darwin": "eb202137d6801bf50c3edc06b5bf16cd5215c66d24790a1168d22fb3a504adf9", "2020-12-30/rust-beta-aarch64-unknown-linux-gnu": "c1d80d58cd0eb74f3db650285c808d18fb0a195fe7ad6461c38de098ff94fc77", "2020-12-30/rust-beta-x86_64-apple-darwin": "ed1b3b8f5fe4e73ddef62a54ec41dada5fb2cd2519f5c5add06be6ea57c38d49", "2020-12-30/rust-beta-x86_64-pc-windows-msvc": "0192de1b6cb415683e231caf3817127230828e6256150cf0a0c8f393cec50650", "2020-12-30/rust-beta-x86_64-unknown-freebsd": "23515e664a0a87bd217bbcdec8785f52485ad8f74c7ff84b6949c0a16f09be1b", "2020-12-30/rust-beta-x86_64-unknown-linux-gnu": "5ebe5cdf55eb79dad2c34fc770c5a35a6be2ef2a72865db291932ca193467b6d", "2020-12-30/rust-nightly-aarch64-apple-darwin": "4cc5ef6dc2e7524da659e416b68b353f61576aeefccc33c0f2564699d5d0cf91", "2020-12-30/rust-nightly-aarch64-unknown-linux-gnu": "1539f5181c1993abb7a43b14dd1294d88453e48f8670c4574e0b5e98e6df28fe", "2020-12-30/rust-nightly-x86_64-apple-darwin": "2b5b885694d0d1a9bdd0473d9e2df1f2c6eac88986e3135e6573e1d71e7824dc", "2020-12-30/rust-nightly-x86_64-pc-windows-msvc": "2c9086371cee98ce95cf10098cd655b2e33dd70e8e250759a1e8b0e8c42d659e", "2020-12-30/rust-nightly-x86_64-unknown-freebsd": "79e5492d9a5f9f04ec5080be1fe305a3d7adde330f5c3fb9d7a3bae52720a027", "2020-12-30/rust-nightly-x86_64-unknown-linux-gnu": "1a6b541f2d0ccda148a60d749e974cc545d9765b71d8dec59418b493f05209a2", "2020-12-30/rust-std-beta-aarch64-apple-darwin": "0065919e445e3eaa8d70561b949697b8e3af9beea62989c9ffc60856d46a9da3", "2020-12-30/rust-std-beta-aarch64-unknown-linux-gnu": "426bd1cc7a0e94af5decd643d08c54fe9aab29e638cd79aca21ccb05ec00eaf8", "2020-12-30/rust-std-beta-wasm32-unknown-unknown": "3cf97eba1da6d14160e82de4c0302883fb2eb9c65151dd2a148c57cba430f5ec", "2020-12-30/rust-std-beta-wasm32-wasi": "e86f3f58cc04bf4c4f9d94ac11e7244510a35f89795298658de2153a7fa60f86", "2020-12-30/rust-std-beta-x86_64-apple-darwin": "12f5a181b6102f75e85b71259283d852777940cf82d1681fb19005b589076a83", "2020-12-30/rust-std-beta-x86_64-pc-windows-msvc": "a02da2dbd7f3d14e4a2083a497aa7aa884b99e6ea941059102278dd2325c5b61", "2020-12-30/rust-std-beta-x86_64-unknown-freebsd": "379f353e27b8218ed6bb54f1ef16314624705e9b28f5cd6047bc25259aeb0bf6", "2020-12-30/rust-std-beta-x86_64-unknown-linux-gnu": "6929f00e1cb93b16bd2e3d76029b297f099818183ac2d7ff23eb532d4c31ebb6", "2020-12-30/rust-std-nightly-aarch64-apple-darwin": "a01bcc6eb93b1883bc57739959d6f9e13fbb80e1867310272cdb1a1de496cf73", "2020-12-30/rust-std-nightly-aarch64-unknown-linux-gnu": "3997bd9e5057851b9e49ebcba5886ca98bf736c062c122e677fcf40aa7ac5416", "2020-12-30/rust-std-nightly-wasm32-unknown-unknown": "b1669be863b7f419254382e9e3820e9ef0d69c60fa45f91d0625140229725484", "2020-12-30/rust-std-nightly-wasm32-wasi": "8f2b0a30cdf50d748e57d23d94d54a4e175e864296c8048c8454bb6198b16fb0", "2020-12-30/rust-std-nightly-x86_64-apple-darwin": "17912a6a5aa56daeb0aed5fca8698bacc54950351d9f91989a524588e37e41ca", "2020-12-30/rust-std-nightly-x86_64-pc-windows-msvc": "4d2585cb12606f217150509971850330cc1b7f3e1a9c18ce03fd3b981021fa1f", "2020-12-30/rust-std-nightly-x86_64-unknown-freebsd": "ac517f0ccc4b30f3a296a25a8b17f75f877052cd56ae5c5a043d88c0f5de972b", "2020-12-30/rust-std-nightly-x86_64-unknown-linux-gnu": "5b1bd5fa31b9190c66b3446629c155d4896cffc8fb1f9f603a2e949162b7f791", "2020-12-30/rustc-beta-aarch64-apple-darwin": "20107f4541be8822d428c402e010333f2f00aaf086d03b4e35ce8d1bd5c33d5a", "2020-12-30/rustc-beta-aarch64-unknown-linux-gnu": "29e2808cadf8da481a0ace30bf107372dd108b0475706cbe2b9cdd4ff27e2315", "2020-12-30/rustc-beta-x86_64-apple-darwin": "d65df5791d79e13037672d22055ec24583195554cdf7c3c2992cbcafa497e98f", "2020-12-30/rustc-beta-x86_64-pc-windows-msvc": "7f934dd412207c0d776fb4e8ec4c5f4426e92b2a1854416a8ce7bbc2dc7f5908", "2020-12-30/rustc-beta-x86_64-unknown-freebsd": "19818ab53bbd94c6d1723a52809bf1c3a271e258664ea2b3b7d00161965e058c", "2020-12-30/rustc-beta-x86_64-unknown-linux-gnu": "4f5ac3311c913b79dca1a02cf42fd7326c63d53ee252447b61f113c043a82b5f", "2020-12-30/rustc-nightly-aarch64-apple-darwin": "4610961ab77e1bb54bda95474b1c1f25f1fc5c1c103bc4f54758e5b2a5454d8b", "2020-12-30/rustc-nightly-aarch64-unknown-linux-gnu": "c9997c01769a6371200e20639fcae99e6dec3d9062f65b2928429e04d4cb7930", "2020-12-30/rustc-nightly-x86_64-apple-darwin": "cf2f06d6c8d784a469561f6323b8b923fb6ad3a7c55c7ac90d5619b9d443ae9f", "2020-12-30/rustc-nightly-x86_64-pc-windows-msvc": "8df3729f3b09cb39fc4b0ecfd90551625941d508f7e776ae4e16fcf02b0af4f3", "2020-12-30/rustc-nightly-x86_64-unknown-freebsd": "f3818645265c3a08cb9fa04d1c2d42be72116974c9c34515feb7d5788e86ac41", "2020-12-30/rustc-nightly-x86_64-unknown-linux-gnu": "79fc8d51bb6d298d292045eb77e1b2d0f7f97886604599a3e9dfc0c6956e49d7", "2020-12-30/rustfmt-beta-aarch64-apple-darwin": "75c957da65d459a02f29affd8fac867e14eb8eec98531fd2216ebcb54a5b6407", "2020-12-30/rustfmt-beta-aarch64-unknown-linux-gnu": "b34fd3a8e80969df9ba71ef5b80f143b4e8f325a91d98b00db1ea86879074b22", "2020-12-30/rustfmt-beta-x86_64-apple-darwin": "bfe5ef2349a226fe54d87658f55dad90b99ee6e36de4f5d1e381b1ca453e1919", "2020-12-30/rustfmt-beta-x86_64-pc-windows-msvc": "ebac84095df62d8ed6b41454c07f043477479a1770cf156a5c9f351bebcbe6a4", "2020-12-30/rustfmt-beta-x86_64-unknown-freebsd": "bfe8a34403fb19d88eb2b4528b3836a645239d239bf56cc9b916aefebd0199a1", "2020-12-30/rustfmt-beta-x86_64-unknown-linux-gnu": "78ac7e3178068c6828765c295698cb79375266cec95b097c4603f8582bd24379", "2020-12-30/rustfmt-nightly-aarch64-apple-darwin": "0c3cfa89787cc9fcdee39acc0b5c5cf3ef084d85fb0e2716926813852fb96a3f", "2020-12-30/rustfmt-nightly-aarch64-unknown-linux-gnu": "5fb27d5f31411c242a8046de087b3dbd73e5829d7e07493858034139681c30c7", "2020-12-30/rustfmt-nightly-x86_64-apple-darwin": "c7da578f3b70dbfa0ae1f06370c7c9f22a49127fed8a99b69dc9ac6e42491bb0", "2020-12-30/rustfmt-nightly-x86_64-pc-windows-msvc": "ebae20dd198a36b657aa0486e6b557aba60c9b4fbff25c108246de312fd2963f", "2020-12-30/rustfmt-nightly-x86_64-unknown-freebsd": "2e0f2e4adcc234d29859aa38088a02be2b2bb0a7e43863bca6d436a6712b8b3b", "2020-12-30/rustfmt-nightly-x86_64-unknown-linux-gnu": "0fb77ae8a33fb83ea496654a52e55ab5245206322f09c1d396e0c5833a16b856", "llvm-tools-1.36.0-aarch64-unknown-linux-gnu": "942856e49837a1c3b9c7d48b52cf0ac0fcb2bb31bb691fe53bfb934afb561c7f", "llvm-tools-1.36.0-x86_64-apple-darwin": "ed702a4174a27fcf118f301e79835c3da205d3d98adb4acc294b72293a2ec790", "llvm-tools-1.36.0-x86_64-pc-windows-msvc": "cf72242bcf873227c026505f56f3ffdaa2febde828d67ad7fc04c4a2e72d7587", "llvm-tools-1.36.0-x86_64-unknown-freebsd": "37c19db740acbe462d878fe193b59653a5073b23a840c6a2e2924772c0642b56", "llvm-tools-1.36.0-x86_64-unknown-linux-gnu": "beae1690418b4adffac166fbfde525be8f5e2b2ce220ffd19b420edb1efa4477", "llvm-tools-1.37.0-aarch64-unknown-linux-gnu": "fb7cea148816422466aee656d81b08f9cb819cff8c431574f08c281b58547413", "llvm-tools-1.37.0-x86_64-apple-darwin": "b882607b0f181d3942eb00a13cb375d820d000ced456a0cfd626ad79f597f8ac", "llvm-tools-1.37.0-x86_64-pc-windows-msvc": "804a1455879b72f9439e9f2d6469f328847ccb432f69b41ccbad2ecc0e124fb2", "llvm-tools-1.37.0-x86_64-unknown-freebsd": "206bf31dc2851a27b697acd5ad978d2b0d1cfdf26e01b7798388030591fa7899", "llvm-tools-1.37.0-x86_64-unknown-linux-gnu": "da54ade6c7e2776edab1b6f1216477168cadf30fe40e503cca8b4bce20d89bc6", "llvm-tools-1.38.0-aarch64-unknown-linux-gnu": "dbbfdc0dd802feb94e8e0f0eb0dad2c2f3e6bf69bb58d371622c94e8c7e82e25", "llvm-tools-1.38.0-x86_64-apple-darwin": "7a4f8502b93e6fc3a4d89ab94230a90c94778d17badcdde25ebb545f4e37a7c0", "llvm-tools-1.38.0-x86_64-pc-windows-msvc": "da005a040ee70728c224eb23d1374420422ac64e2b4ba328ac6d7b5934389061", "llvm-tools-1.38.0-x86_64-unknown-freebsd": "f4da25e84e31a78b6f761b3f597c98391bd6873298c7708dc886b2c72f56f874", "llvm-tools-1.38.0-x86_64-unknown-linux-gnu": "0fff5bc69ebf49fec0372aa73f9b6757b8a6bb506f14f48d153e6f14de2fd19a", "llvm-tools-1.39.0-aarch64-unknown-linux-gnu": "9c7eae2e5770d20872f6012b273d2ca5dab09f97f497a0cc82ea5af8e2b08527", "llvm-tools-1.39.0-x86_64-apple-darwin": "52c15480345a18d55a2141a9f440fe874a8686d3d94e4637b2c4884df7c88a43", "llvm-tools-1.39.0-x86_64-pc-windows-msvc": "87b7cf10ebab53bb7fab625d603f80e35111afaeacd915df63c19ad68382f31f", "llvm-tools-1.39.0-x86_64-unknown-freebsd": "f451ffb87b00a277264c5acf5267f8df61300089a9798607b4cdfebc88fabee1", "llvm-tools-1.39.0-x86_64-unknown-linux-gnu": "0a87b543e3841d415887a4543587b783fce678a7097a774a56a2032cee842991", "llvm-tools-1.40.0-aarch64-unknown-linux-gnu": "caf36148d0f5a885cad05605d80cc2c805ce8456837b6dbb34b47420a4d52475", "llvm-tools-1.40.0-x86_64-apple-darwin": "d4c4abb2a7b2800500ef4e0a46493c5340bb7b0be84d38897573281e93b8577f", "llvm-tools-1.40.0-x86_64-pc-windows-msvc": "0fe4cfa0e4ce99e45c810b8301edcdfa694db75e291b497ce8c52ec5b89e4861", "llvm-tools-1.40.0-x86_64-unknown-freebsd": "aa11c881fa728fa8df233c220fecd6b25cb27cbb673569cbd9a90865ae464d9f", "llvm-tools-1.40.0-x86_64-unknown-linux-gnu": "40c5ad2c53802b8b722ebd5a06b9f51f32644d8a6d6fdc32aacc60a33bed5839", "llvm-tools-1.41.0-aarch64-unknown-linux-gnu": "279aedca8c3c12a0608de9a51fa38a33b910600e4f980487c1706cec29270c63", "llvm-tools-1.41.0-x86_64-apple-darwin": "621676b4ae3d75662463876315a58bd188ceb4b22ff249ad033e0181fe30df74", "llvm-tools-1.41.0-x86_64-pc-windows-msvc": "6d1b3a2a74497b0a4e9420d87a6fa462dc608a3b41d4dae9f164cf66c290a00d", "llvm-tools-1.41.0-x86_64-unknown-freebsd": "311a056371edbad2194b5714f3e8d17e7a897f27b67bdbe2d827ed437d06d050", "llvm-tools-1.41.0-x86_64-unknown-linux-gnu": "d2cfa10a162cd9b63c5b8eb3db49560532c11823bb15f836abc5e42cca1a1170", "llvm-tools-1.42.0-aarch64-unknown-linux-gnu": "7601ef92b42a321fee08f6adce3ca0eb612ca8703fda1db63e30bd4952f7fcc9", "llvm-tools-1.42.0-x86_64-apple-darwin": "c4c0319e8be687b104162ce3654249ed76040229a77d77016e32570fbfbd3439", "llvm-tools-1.42.0-x86_64-pc-windows-msvc": "721b14d159d6df877991db62a0f5fcd11d8d9cd642d8f51311a2d2c99c0f9e43", "llvm-tools-1.42.0-x86_64-unknown-freebsd": "4fc2bb1ab454b21750c78f9ce19d7138e4929a804770202319a3f457b1e5c2f9", "llvm-tools-1.42.0-x86_64-unknown-linux-gnu": "d306ee9009eeab2062b813123628cc440f58c71c0e1d53afe1563f4eb1a5e0e4", "llvm-tools-1.43.0-aarch64-unknown-linux-gnu": "647dc36be8dc5130a703f6ba151bc79936503d0251481ba40bfacc5bfa251947", "llvm-tools-1.43.0-x86_64-apple-darwin": "890bf12d80b72fc0c58966e1d229cfb24764eabe356762dcaf126afbd63fd47d", "llvm-tools-1.43.0-x86_64-pc-windows-msvc": "e299dea627f89f6b14897d45f39dba3036298b2c94f35ba4dfea276996682977", "llvm-tools-1.43.0-x86_64-unknown-freebsd": "6a64cc4b3dd0b8218b350b4fad36197edf2da33e5ab43c4670737e4d392ba586", "llvm-tools-1.43.0-x86_64-unknown-linux-gnu": "4f62cab67e89d78d886cb03379d71f6722f8c5e5c069b3c243e334381c5948cf", "llvm-tools-1.44.0-aarch64-unknown-linux-gnu": "e25ee71a187d6c8969b17788fb678c9b358034ad2a2fb7557b755534eaf9cfa6", "llvm-tools-1.44.0-x86_64-apple-darwin": "d684de7783ee15537f78231acacb9079f821c8c8b85b889e54c40b095ae6b0a1", "llvm-tools-1.44.0-x86_64-pc-windows-msvc": "ac84fcc25d5d8d20592d6491576df7a72059fe9317889692badac2fc9028bd8a", "llvm-tools-1.44.0-x86_64-unknown-freebsd": "3f4a17239b9dc9e84d98922ea4725f741249ba597ac1345b09c818b54b7a0765", "llvm-tools-1.44.0-x86_64-unknown-linux-gnu": "1755b589718c652071e354c3629f41a9a90a84a3649078ed697e630ba19b3592", "llvm-tools-1.45.0-aarch64-unknown-linux-gnu": "1432bf52b301e16a5a57398a7f59bcee43358913627c7caf7b1568cd8824c5c4", "llvm-tools-1.45.0-x86_64-apple-darwin": "9c4e5488be910b8b5ded830ea4c8844090801d3f35e7d9cb1f272e3e7df90a0d", "llvm-tools-1.45.0-x86_64-pc-windows-msvc": "5a2b5f49e04def6bc6bdb148412bf62ca7fd01d0e8ed61d07fe6716003425350", "llvm-tools-1.45.0-x86_64-unknown-freebsd": "dbeaa09b90aab06a8450afaa9018a859a440be48d98e9437a7d827a138d3ae7a", "llvm-tools-1.45.0-x86_64-unknown-linux-gnu": "54a2ac31ad53d3d346c571fa1d25b730b614a8214b5484c511f21f7dd0bdbd5f", "llvm-tools-1.46.0-aarch64-unknown-linux-gnu": "1d8107ff0682d20c37a0d42f54fa1e2e96e70f7c4694fc71a84f7b32e3793247", "llvm-tools-1.46.0-x86_64-apple-darwin": "1045f55a6e59326e0f5b46616e8c945f0cc04c4519f21aa095f87b3e35420422", "llvm-tools-1.46.0-x86_64-pc-windows-msvc": "037719e7774bae1e3084949123a8a10d4d2c89134849333a53c7dcad00fe412e", "llvm-tools-1.46.0-x86_64-unknown-freebsd": "55d9194cd9ac3f26f95f4f94db899c86b140753ef57aa2996dd8be528eaf8ae0", "llvm-tools-1.46.0-x86_64-unknown-linux-gnu": "2a98e7290148575cdc6230610fca3ce68d1bd7b7dd105124f8a1673859ecc9ad", "llvm-tools-1.47.0-aarch64-unknown-linux-gnu": "6f9cc27ea4d33ef81be176392d169a2ca2ba6d3e6e8c037917133823cc4979c1", "llvm-tools-1.47.0-x86_64-apple-darwin": "75a8381f7f521ad8afc8480e2bda27d3d3730b9ee154022deb26db3ca6216505", "llvm-tools-1.47.0-x86_64-pc-windows-msvc": "e9a5d9db6f899904f094cc745a1b5cc47f7d7bbcb708217ad68933316e814880", "llvm-tools-1.47.0-x86_64-unknown-freebsd": "47592da88536cf5c44085907c7e5d57bf695d5ac8add76d2c7d1c0518e6e05e6", "llvm-tools-1.47.0-x86_64-unknown-linux-gnu": "a52c3cd18a6895c91a49d0a00f2cb4b12d64dd5b1ef6607fade1fed88fc36dac", "llvm-tools-1.48.0-aarch64-unknown-linux-gnu": "133e6b94d3c34d91ea9689c9288c66acf169d59877c0c924fc99b1fee283f4f4", "llvm-tools-1.48.0-x86_64-apple-darwin": "de0715d6cb0456da647750605ea1a3e3832278a4fa500d9c13bd148e7b278afe", "llvm-tools-1.48.0-x86_64-pc-windows-msvc": "a0506c1619708e2bdf6bc198db5d130965613ec0609a9fe75556ce5effdf4f78", "llvm-tools-1.48.0-x86_64-unknown-freebsd": "61a56f1436c7e4bfe68be160abb61989a8b4b4fef5e939764d488587484d6da3", "llvm-tools-1.48.0-x86_64-unknown-linux-gnu": "a4932dafdc84a2c2f4f67a9aa207ce306c36a4ed8e682e6d79764d438ebd00b8", "llvm-tools-1.49.0-aarch64-apple-darwin": "78f666e9608c6b38f704447ef270170154c55dcda033e4fab00c42bebc3319a5", "llvm-tools-1.49.0-aarch64-unknown-linux-gnu": "50228dd0c1ea9f483cac055fd1ff82f202427ef970266e904be01133c40f0c91", "llvm-tools-1.49.0-x86_64-apple-darwin": "39c294fb87e6dc8c29975469a0566d4f8a47e50c1defe9f3dabbf1d598772bea", "llvm-tools-1.49.0-x86_64-pc-windows-msvc": "3e57ff66c2a0091e3373e479fec699d3012e9249b7e0da36500fa0071308114f", "llvm-tools-1.49.0-x86_64-unknown-freebsd": "c2b3c06bf4b2f6010f9927391c8e72f96642a528c486ec98f66c16066298e015", "llvm-tools-1.49.0-x86_64-unknown-linux-gnu": "aecf6c322dc4064dcedf2315d443a69e099fc52e617711306fa1269cb180aa68", "rust-1.26.0-aarch64-unknown-linux-gnu": "e12dc84bdb569cdb382268a5fe6ae6a8e2e53810cb890ec3a7133c20ba8451ac", "rust-1.26.0-x86_64-apple-darwin": "38708803c3096b8f101d1919ee2d7e723b0adf1bc1bb986b060973b57d8c7c28", "rust-1.26.0-x86_64-pc-windows-msvc": "20631bf942242d4be82363030839851bf18a2199b74a661bdc334f830e9e1d5a", "rust-1.26.0-x86_64-unknown-freebsd": "a03cbe097670042c90d18654fbc852c9d473261d61c03d0f745bbaee759780ed", "rust-1.26.0-x86_64-unknown-linux-gnu": "13691d7782577fc9f110924b26603ade1990de0b691a3ce2dc324b4a72a64a68", "rust-1.26.1-aarch64-unknown-linux-gnu": "d4a369053c2dfd5f457de6853557dab563944579fa4bb55bc919bacf259bff6d", "rust-1.26.1-x86_64-apple-darwin": "ebf898b9fa7e2aafc53682a41f18af5ca6660ebe82dd78f28cd9799fe4dc189a", "rust-1.26.1-x86_64-pc-windows-msvc": "56c2398de358094606afba419c1e1a9e499cbe6f894315e99cfebda9f765c52f", "rust-1.26.1-x86_64-unknown-freebsd": "910128f60c680e175ae93722272f491c6835f27652f9f3fe415dc0d9c482e204", "rust-1.26.1-x86_64-unknown-linux-gnu": "b7e964bace1286696d511c287b945f3ece476ba77a231f0c31f1867dfa5080e0", "rust-1.26.2-aarch64-unknown-linux-gnu": "3dfad0dc9c795f7ee54c2099c9b7edf06b942adbbf02e9ed9e5d4b5e3f1f3759", "rust-1.26.2-x86_64-apple-darwin": "f193705d4c0572a358670dbacbf0ffadcd04b3989728b442f4680fa1e065fa72", "rust-1.26.2-x86_64-pc-windows-msvc": "c4195cc0541db7cb08d503cc38917f6f40f53826001e86d613a48bd7387ac6a0", "rust-1.26.2-x86_64-unknown-freebsd": "0ad985cf36b3946f086fd3c3c6eb97b0c94b24285147a04da22c00d4d522727a", "rust-1.26.2-x86_64-unknown-linux-gnu": "d2b4fb0c544874a73c463993bde122f031c34897bb1eeb653d2ba2b336db83e6", "rust-1.27.0-aarch64-unknown-linux-gnu": "e74ebc33dc3fc19e501a677a87b619746efdba2901949a0319176352f556673a", "rust-1.27.0-x86_64-apple-darwin": "a1d48190992e01aac1a181bce490c80cb2c1421724b4ff0e2fb7e224a958ce0f", "rust-1.27.0-x86_64-pc-windows-msvc": "795585a4f49dfcfd719dd6678713d0e84979b265ae9265dcb26b45c67b3a883a", "rust-1.27.0-x86_64-unknown-freebsd": "f0754434f76f261ecdfd7ea3645b251b0188e263c0c7a7466aafac1b034d20ec", "rust-1.27.0-x86_64-unknown-linux-gnu": "235ad78e220b10a2d0267aea1e2c0f19ef5eaaff53ad6ff8b12c1d4370dec9a3", "rust-1.27.1-aarch64-unknown-linux-gnu": "d1146b240e6f628224c3a67e3aae2a57e6c25d544115e5ece9ce91861ec92b3a", "rust-1.27.1-x86_64-apple-darwin": "475be237962d6aef1038a2faada26fda1e0eaea5d71d6950229a027a9c2bfe08", "rust-1.27.1-x86_64-pc-windows-msvc": "24fb59a42277487ab1aaf8ac8b7a988843ae851ffe4a3386d9339e99e42d08d0", "rust-1.27.1-x86_64-unknown-freebsd": "739d38036c9f08c13bc7425cc5cccd3dd37860fa6e9dfc7bcd9081c8d3c5ccdd", "rust-1.27.1-x86_64-unknown-linux-gnu": "435778a837af764da2a7a7fb4d386b7b78516c7dfc732d892858e9a8a539989b", "rust-1.27.2-aarch64-unknown-linux-gnu": "cf84da70269c0e50bb3cc3d248bae1ffcd70ee69dc5a4e3513b54fefc6685fb4", "rust-1.27.2-x86_64-apple-darwin": "30c5cc58759caa4efdf2ea7d8438633139c98bee3408beb29ceb26985f3f5f70", "rust-1.27.2-x86_64-pc-windows-msvc": "be1cccbd4cc00d473cb19fee4402d0ffde3b1e3ca3701926d47590878bc88508", "rust-1.27.2-x86_64-unknown-freebsd": "b114c5eebc120b360d4d3c4360421ff181cc47bb311e161d3af6971b6d3e6244", "rust-1.27.2-x86_64-unknown-linux-gnu": "5028a18e913ef3eb53e8d8119d2cc0594442725e055a9361012f8e26f754f2bf", "rust-1.28.0-aarch64-unknown-linux-gnu": "9b6fbcee73070332c811c0ddff399fa31965bec62ef258656c0c90354f6231c1", "rust-1.28.0-x86_64-apple-darwin": "5d7a70ed4701fe9410041c1eea025c95cad97e5b3d8acc46426f9ac4f9f02393", "rust-1.28.0-x86_64-pc-windows-msvc": "5990e79259967a6a176aa5e4c55c6395f0c9262eed61ea858cfb909bac477542", "rust-1.28.0-x86_64-unknown-freebsd": "cac701973239cbec802780855b172a3cc85ce15602e72873fe966d9d7d807e07", "rust-1.28.0-x86_64-unknown-linux-gnu": "2a1390340db1d24a9498036884e6b2748e9b4b057fc5219694e298bdaa37b810", "rust-1.29.0-aarch64-unknown-linux-gnu": "0ed3be0fd9f847afeb4e587fff61f6769ea61b53719d3ea999326284e8975b36", "rust-1.29.0-x86_64-apple-darwin": "28a0473637585742f6d80ccd8afd88b6b400e65d623c33cb892412759444da93", "rust-1.29.0-x86_64-pc-windows-msvc": "64f8c85540520c82d579d7eac5e2a524b42a6083cc46c7e80181512651a66fef", "rust-1.29.0-x86_64-unknown-freebsd": "3500b1683849cbe526bb79f460147aa387b79a4f9a6a4760e276f73ddbffafd5", "rust-1.29.0-x86_64-unknown-linux-gnu": "09f99986c17b1b6b1bfbc9dd8785e0e4693007c5feb67915395d115c1a3aea9d", "rust-1.29.1-aarch64-unknown-linux-gnu": "2685224f67b2ef951e0e8b48829f786cbfed95e19448ba292ac33af719843dbe", "rust-1.29.1-x86_64-apple-darwin": "07b07fbd6fab2390e19550beb8008745a8626cc5e97b72dc659061c1c3b3d008", "rust-1.29.1-x86_64-pc-windows-msvc": "ec15b45be27b4406122518b2949f6186f0d9d422f23a946ab4de43716cc8e492", "rust-1.29.1-x86_64-unknown-freebsd": "4055a9e9990f83f6c0d4f2040b2704edb8dbdaf82933f8598ab4ee31c541bbb9", "rust-1.29.1-x86_64-unknown-linux-gnu": "b36998aea6d58525f25d89f1813b6bfd4cad6ff467e27bd11e761a20dde43745", "rust-1.29.2-aarch64-unknown-linux-gnu": "e11461015ca7106ef8ebf00859842bf4be518ee170226cb8eedaaa666946509f", "rust-1.29.2-x86_64-apple-darwin": "63f54e3013406b39fcb5b84bcf5e8ce85860d0b97a1e156700e467bf5fb5d5f2", "rust-1.29.2-x86_64-pc-windows-msvc": "7813396fb99021e9a8bccb2fc7e71b1b730d5f3aebbb09ffcc2ecb838a1073b4", "rust-1.29.2-x86_64-unknown-freebsd": "2e209d505c730df6e68575424eec03ed924e12114ad60595602cb2513c6a382a", "rust-1.29.2-x86_64-unknown-linux-gnu": "e9809825c546969a9609ff94b2793c9107d7d9bed67d557ed9969e673137e8d8", "rust-1.30.0-aarch64-unknown-linux-gnu": "9690c7c50eba5a8461184ee4138b4c284bad31ccc4aa1f2ddeec58b253e6363e", "rust-1.30.0-x86_64-apple-darwin": "07008d90932712282bc599f1e9a226e97879c758dc1f935e6e2675e45694cc1b", "rust-1.30.0-x86_64-pc-windows-msvc": "960ca17c0c62ee250647c20b617e75912badb67ca8ade08c3224410a7c320ade", "rust-1.30.0-x86_64-unknown-freebsd": "b4e5d00b318d56edb7ba9182af4210fca9d7f44b64bc1380456ff3c17584af52", "rust-1.30.0-x86_64-unknown-linux-gnu": "f620e3125cc505c842150bd873c0603432b6cee984cdae8b226cf92c8aa1a80f", "rust-1.30.1-aarch64-unknown-linux-gnu": "6d87d81561285abd6c1987e07b60b2d723936f037c4b46eedcc12e8566fd3874", "rust-1.30.1-x86_64-apple-darwin": "3ba1704a7defe3d9a6f0c1f68792c084da83bcba85e936d597bac0c019914b94", "rust-1.30.1-x86_64-pc-windows-msvc": "b0110a5ad461532b2cce59bc04346af739b4660e7241f92dde6442a11a5391c2", "rust-1.30.1-x86_64-unknown-freebsd": "480db9003f8e8c4ad12f2868af2c1489a05b18a8dcc62985c52310a7a15201ce", "rust-1.30.1-x86_64-unknown-linux-gnu": "a01a493ed8946fc1c15f63e74fc53299b26ebf705938b4d04a388a746dfdbf9e", "rust-1.31.0-aarch64-unknown-linux-gnu": "4e68c70aba58004d9e86c2b4463e88466affee51242349a038b456cf6f4be5c9", "rust-1.31.0-x86_64-apple-darwin": "5d4035e3cecb7df13e728bcff125b52b43b126e91f8311c66b143f353362606f", "rust-1.31.0-x86_64-pc-windows-msvc": "9288248f1821ab53557cbc5728ade7d221b1670547b0c0ec35099e0b2993dcf4", "rust-1.31.0-x86_64-unknown-freebsd": "936ca1503ab1f18d9a4a1cc27fbc655f2c532ba819e1109bb03f5c52c5fb4fdd", "rust-1.31.0-x86_64-unknown-linux-gnu": "c8a2016109ffdc12a488660edc5f30c1643729efc15abe311ebb187437e506bf", "rust-1.31.1-aarch64-unknown-linux-gnu": "29a7c6eb536fefd0ca459e48dfaea006aa8bff8a87aa82a9b7d483487033632a", "rust-1.31.1-x86_64-apple-darwin": "8398b1b303bdf0e7605d08b87070a514a4f588797c6fb3593718cb9cec233ad6", "rust-1.31.1-x86_64-pc-windows-msvc": "4d2aa25c9d79dca5aba67b7b1df1c1f0ad40fcfb25a4c1d364fd64dd17a63cf3", "rust-1.31.1-x86_64-unknown-freebsd": "5cbb465a0843b31da217c51c4f9ebbb2508aa2ece41e9b98303101e12571de42", "rust-1.31.1-x86_64-unknown-linux-gnu": "a64685535d0c457f49a8712a096a5c21564cd66fd2f7da739487f028192ebe3c", "rust-1.32.0-aarch64-unknown-linux-gnu": "60def40961728212da4b3a9767d5a2ddb748400e150a5f8a6d5aa0e1b8ba1cee", "rust-1.32.0-x86_64-apple-darwin": "f0dfba507192f9b5c330b5984ba71d57d434475f3d62bd44a39201e36fa76304", "rust-1.32.0-x86_64-pc-windows-msvc": "51b0b64cc843d6e443bf19f89b61addb532ea61e02777c7e80a185a9a263776b", "rust-1.32.0-x86_64-unknown-freebsd": "20d062493d01f1816014fe9dbe883bda06f1828a6ddbfb7ee5e4f1df20eb1c3a", "rust-1.32.0-x86_64-unknown-linux-gnu": "e024698320d76b74daf0e6e71be3681a1e7923122e3ebd03673fcac3ecc23810", "rust-1.33.0-aarch64-unknown-linux-gnu": "a308044e4076b62f637313ea803fa0a8f340b0f1b53136856f2c43afcabe5387", "rust-1.33.0-x86_64-apple-darwin": "864e7c074a0b88e38883c87c169513d072300bb52e1d320a067bd34cf14f66bd", "rust-1.33.0-x86_64-pc-windows-msvc": "b477be7a27799397cf90f09ef5efe21b1af02f48ec9bc1be3306ad298aaf8841", "rust-1.33.0-x86_64-unknown-freebsd": "31ab015c1807a7c231ee74b4fb367f3fa43551d6c49cd2f7b63541f1fef0cc72", "rust-1.33.0-x86_64-unknown-linux-gnu": "6623168b9ee9de79deb0d9274c577d741ea92003768660aca184e04fe774393f", "rust-1.34.0-aarch64-unknown-linux-gnu": "370c3a8fb9a69df36d645a95e622fb59ac5b513baecddde706cedaf20defa269", "rust-1.34.0-x86_64-apple-darwin": "e6bea8d865cc7341c17fa3b8f25f7989e6b04f53e9da24878addc524f3a32664", "rust-1.34.0-x86_64-pc-windows-msvc": "471325ceb9492239f7bb399cb88df230791966c0f76f01020aa9d2868bafcfb5", "rust-1.34.0-x86_64-unknown-freebsd": "bc9048312bee935ae1e7417e2f6840ea76fe370752915ca605ec7dc5b606dba9", "rust-1.34.0-x86_64-unknown-linux-gnu": "170647ed41b497dc937a6b2556700210bc4be187b1735029ef9ccf52e2cb5ab8", "rust-1.35.0-aarch64-unknown-linux-gnu": "31e6da56e67838fd2874211ae896a433badf67c13a7b68481f1d5f7dedcc5952", "rust-1.35.0-x86_64-apple-darwin": "ac14b1c7dc330dcb53d8641d74ebf9b32aa8b03b9d650bcb9258030d8b10dbd6", "rust-1.35.0-x86_64-pc-windows-msvc": "4f8935cea6b68c447b5fcb5974e0df3fefc77d15ab4f7d535779f06c3e4adc84", "rust-1.35.0-x86_64-unknown-freebsd": "a6a3c7983a880d8e9bf475735b725c47de68831abc22da980e44a3aca5c5bd89", "rust-1.35.0-x86_64-unknown-linux-gnu": "cf600e2273644d8629ed57559c70ca8db4023fd0156346facca9ab3ad3e8f86c", "rust-1.36.0-aarch64-unknown-linux-gnu": "db78c24d93756f9fe232f081dbc4a46d38f8eec98353a9e78b9b164f9628042d", "rust-1.36.0-x86_64-apple-darwin": "91f151ec7e24f5b0645948d439fc25172ec4012f0584dd16c3fb1acb709aa325", "rust-1.36.0-x86_64-pc-windows-msvc": "c7c9f7f996d195f464b84eaf0b6a068b41d1480e088b12e5134f85a5a144bd30", "rust-1.36.0-x86_64-unknown-freebsd": "eeeb1e9d0d7823c55f00f434789696e7249f465ba5966a5ab479040e3912c0e7", "rust-1.36.0-x86_64-unknown-linux-gnu": "15e592ec52f14a0586dcebc87a957e472c4544e07359314f6354e2b8bd284c55", "rust-1.37.0-aarch64-unknown-linux-gnu": "263ef98fa3a6b2911b56f89c06615cdebf6ef676eb9b2493ad1539602f79b6ba", "rust-1.37.0-x86_64-apple-darwin": "b2310c97ffb964f253c4088c8d29865f876a49da2a45305493af5b5c7a3ca73d", "rust-1.37.0-x86_64-pc-windows-msvc": "4e42652e7bf7ef13b7fdf8c64d0adf4e18c6a765e482e4c62a4dded36d4d08e1", "rust-1.37.0-x86_64-unknown-freebsd": "58a794fa9da9c14cefda55e7d4d13276517265a05a49f3a048033aee8870388f", "rust-1.37.0-x86_64-unknown-linux-gnu": "cb573229bfd32928177c3835fdeb62d52da64806b844bc1095c6225b0665a1cb", "rust-1.38.0-aarch64-unknown-linux-gnu": "06afd6d525326cea95c3aa658aaa8542eab26f44235565bb16913ac9d12b7bda", "rust-1.38.0-x86_64-apple-darwin": "bd301b78ddcd5d4553962b115e1dca5436dd3755ed323f86f4485769286a8a5a", "rust-1.38.0-x86_64-pc-windows-msvc": "99e2e22084a7c6a114f5353800677e1f7eb4b8cecf1b8841e21ac9579fe8da8c", "rust-1.38.0-x86_64-unknown-freebsd": "a765b1f01a387b15b576b67c77e02609a6d9a6769584742f66f0cac1944c0f7f", "rust-1.38.0-x86_64-unknown-linux-gnu": "adda26b3f0609dbfbdc2019da4a20101879b9db2134fae322a4e863a069ec221", "rust-1.39.0-aarch64-unknown-linux-gnu": "e27dc8112fe577012bd88f30e7c92dffd8c796478ce386c49465c03b6db8209f", "rust-1.39.0-x86_64-apple-darwin": "3736d49c5e9592844e1a5d5452883aeaf8f1e25d671c1bc8f01e81c1766603b5", "rust-1.39.0-x86_64-pc-windows-msvc": "3c96b221af3343c04bf81e621a0b97a2452ae1803ecc2841a162690d8ebfe46f", "rust-1.39.0-x86_64-unknown-freebsd": "9cb25742e727bab0da5feb957ef61f7ffc836b4d5d0e6cabfdf28fb68caf5fdd", "rust-1.39.0-x86_64-unknown-linux-gnu": "b10a73e5ba90034fe51f0f02cb78f297ed3880deb7d3738aa09dc5a4d9704a25", "rust-1.40.0-aarch64-unknown-linux-gnu": "639271f59766d291ebdade6050e7d05d61cb5c822a3ef9a1e2ab185fed68d729", "rust-1.40.0-x86_64-apple-darwin": "749ca5e0b94550369cc998416b8854c13157f5d11d35e9b3276064b6766bcb83", "rust-1.40.0-x86_64-pc-windows-msvc": "64d98af9b9114a3aaea096ba74c43cad75a2502fb682e941b4701f5d2a2b9272", "rust-1.40.0-x86_64-unknown-freebsd": "d1a58e9f743f4a55513f74e41c90ab7b291413ce46336c138762fd9aa6605b32", "rust-1.40.0-x86_64-unknown-linux-gnu": "fc91f8b4bd18314e83a617f2389189fc7959146b7177b773370d62592d4b07d0", "rust-1.41.0-aarch64-unknown-linux-gnu": "79ddfb5e2563d0ee09a567fbbe121a2aed3c3bc61255b2787f2dd42183a10f27", "rust-1.41.0-x86_64-apple-darwin": "b6504003ab70b11f278e0243a43ba9d6bf75e8ad6819b4058a2b6e3991cc8d7a", "rust-1.41.0-x86_64-pc-windows-msvc": "4c43a64e83c28bfb788782b01d95034ecc59bf9846006aa1deb6986c139b9f9d", "rust-1.41.0-x86_64-unknown-freebsd": "ae1093a1e476f5c7b1c1f59f986d64b5f82a76b865c9823bcc3d5061bb93ff9f", "rust-1.41.0-x86_64-unknown-linux-gnu": "343ba8ef7397eab7b3bb2382e5e4cb08835a87bff5c8074382c0b6930a41948b", "rust-1.42.0-aarch64-unknown-linux-gnu": "fdd39f856a062af265012861949ff6654e2b7103be034d046bec84ebe46e8d2d", "rust-1.42.0-x86_64-apple-darwin": "db1055c46e0d54b99da05e88c71fea21b3897e74a4f5ff9390e934f3f050c0a8", "rust-1.42.0-x86_64-pc-windows-msvc": "4a3131ff6d2b04d120069e0ba494a6418db1c691fc8e4627cf1aaf2ffbaf5ad9", "rust-1.42.0-x86_64-unknown-freebsd": "230bcf17e4383fba85d3c87fe25d17737459fe561a5f4668fe70dcac2da4e17c", "rust-1.42.0-x86_64-unknown-linux-gnu": "7d1e07ad9c8a33d8d039def7c0a131c5917aa3ea0af3d0cc399c6faf7b789052", "rust-1.43.0-aarch64-unknown-linux-gnu": "e5fa55f333c10cdae43d147438a80ffb435d6c7b9681cd2e2f0857c024556856", "rust-1.43.0-x86_64-apple-darwin": "504e8efb2cbb36f5a3db7bb36f339a1e5216082c910ad19039c370505cfbde99", "rust-1.43.0-x86_64-pc-windows-msvc": "78dea49969addb3ef7a3a3816482534828a5140c866a828be69ccfeb44972a3b", "rust-1.43.0-x86_64-unknown-freebsd": "2555aa83d1559af19054befdaea3ae560374376f9973aa3dad2c41fcd2eb84d4", "rust-1.43.0-x86_64-unknown-linux-gnu": "069f34fa5cef92551724c83c36360df1ac66fe3942bc1d0e4d341ce79611a029", "rust-1.44.0-aarch64-unknown-linux-gnu": "bcc916003cb9c7ff44f5f9af348020b422dbc5bd4fe49bdbda2de6ce0a1bb745", "rust-1.44.0-x86_64-apple-darwin": "f20388b80b2b0a8b122d89058f785a2cf3b14e93bcac53471d60fdb4106ffa35", "rust-1.44.0-x86_64-pc-windows-msvc": "127cf6569c4958e362f06f850eec6cba0ad69474ab15fef2dee740aee45a3169", "rust-1.44.0-x86_64-unknown-freebsd": "e2ad3224790d2283d7ef66d5e1f08cec688f1c29cf53326c9a6c28fb4914b6a1", "rust-1.44.0-x86_64-unknown-linux-gnu": "eaa34271b4ac4d2c281831117d4d335eed0b37fe7a34477d9855a6f1d930a624", "rust-1.45.0-aarch64-unknown-linux-gnu": "b727be0ecdee5fb88775b784758a09ab696293048a80288999b8a6f78b160212", "rust-1.45.0-x86_64-apple-darwin": "8e91f99ffbf5ae86d659d3515315a8e92ef44210102672c1536a9902cc182401", "rust-1.45.0-x86_64-pc-windows-msvc": "7d1118568b83fd1da5312de95ca6f30d4f21dae57073c00a216437e4c02733cc", "rust-1.45.0-x86_64-unknown-freebsd": "3d09db6a127558cfdb4fc44106e7d478bb8f6cc6148d536b90d30610181fc656", "rust-1.45.0-x86_64-unknown-linux-gnu": "c34ed8722759fd60c94dbc9069833da5b3b873dcd19afaa9b34c1ce2c2cfa229", "rust-1.46.0-aarch64-unknown-linux-gnu": "f0c6d630f3dedb3db69d69ed9f833aa6b472363096f5164f1068c7001ca42aeb", "rust-1.46.0-x86_64-apple-darwin": "82d61582a3772932432a99789c3b3bd4abe6baca339e355048ca9efb9ea5b4db", "rust-1.46.0-x86_64-pc-windows-msvc": "3545eb66ed7c6222ca4eb9e990d4bef63edbac9b580387bf7035501ee35d453f", "rust-1.46.0-x86_64-unknown-freebsd": "30d8b05073b23f0621ed00276208589dcd7669776b752a67c66c9c928ebbe258", "rust-1.46.0-x86_64-unknown-linux-gnu": "e3b98bc3440fe92817881933f9564389eccb396f5f431f33d48b979fa2fbdcf5", "rust-1.47.0-aarch64-unknown-linux-gnu": "753c905e89a714ab9bce6fe1397b721f29c0760c32f09d2f328af3d39919c8e6", "rust-1.47.0-x86_64-apple-darwin": "84e5be6c5c78734deba911dcf80316be1e4c7da2c59413124d039ad96620612f", "rust-1.47.0-x86_64-pc-windows-msvc": "c9f93f8c821090e1c96384bef564e9c9d86bd13ef8d1116b3f17e124f07f55cc", "rust-1.47.0-x86_64-unknown-freebsd": "650af0288d099c9debef7258a27caf15dd8aaf033ee1a099b4c5216c95ecfeaa", "rust-1.47.0-x86_64-unknown-linux-gnu": "d0e11e1756a072e8e246b05d54593402813d047d12e44df281fbabda91035d96", "rust-1.48.0-aarch64-unknown-linux-gnu": "c4769418d8d89f432e4a3a21ad60f99629e4b13bbfc29aef7d9d51c4e8ee8a8a", "rust-1.48.0-x86_64-apple-darwin": "20e727cad10f43e3abcedb2a80979ae26923038e0e8a855e8a783da255054113", "rust-1.48.0-x86_64-pc-windows-msvc": "0fdf41bb9b45e923000205b08329e15124f01b9b32986d73cd36625f3c7d883b", "rust-1.48.0-x86_64-unknown-freebsd": "21e24489ffaabe517e5e87572707784d5b471646164109b248957a2d32e7a8b9", "rust-1.48.0-x86_64-unknown-linux-gnu": "950420a35b2dd9091f1b93a9ccd5abc026ca7112e667f246b1deb79204e2038b", "rust-1.49.0-aarch64-apple-darwin": "ce7d689e6f73dd9c07b672ba23dabe5159fa8c194dce71b4f3f95baeaf564082", "rust-1.49.0-aarch64-unknown-linux-gnu": "b551bd482041307fa3373a687d6d6a2c4c0931c2e0a68b8b75dc80bc5cf5f002", "rust-1.49.0-x86_64-apple-darwin": "fe3e248bc4b0ee0a2595693687ad845c8a8bda824a56c9321520bcca02433716", "rust-1.49.0-x86_64-pc-windows-msvc": "5340831dcf98344de4a6888b50237f82568a97a46d9814f1400720dde0c7b6e5", "rust-1.49.0-x86_64-unknown-freebsd": "dced98577e834f511cae8e58290539ad6b8dd40ae512e90d1371f650961bd930", "rust-1.49.0-x86_64-unknown-linux-gnu": "8b14446df82f3707d69cf58fed92f18e0bff91621c62baf89288ef70e3e92981", "rust-std-1.26.0-aarch64-unknown-linux-gnu": "a583ddc2d4b5f9516bf136f781268ae0e813295d1d145fab4b46a4220f448923", "rust-std-1.26.0-wasm32-unknown-unknown": "0f8bb8bdb523cd05acd11006d47b14d7589e64fe25a43d1aec5df692988b400f", "rust-std-1.26.0-x86_64-apple-darwin": "cb5a0114e9e383aa93267868482db84f791124ee4faafdaed08ec6782d000fc2", "rust-std-1.26.0-x86_64-pc-windows-msvc": "88ae8697a84cfddc72429fb0880e6d8663d99ab98a69d27c06d21b4e668b13d9", "rust-std-1.26.0-x86_64-unknown-freebsd": "38cd138eba2ccaff59513d154fec580b6663ca6ef38cd620c348364aa1e11a40", "rust-std-1.26.0-x86_64-unknown-linux-gnu": "e27cb5c21541a500c8df919e15c8d3b002456ebbe573122e7b058cf5b4c3c13a", "rust-std-1.26.1-aarch64-unknown-linux-gnu": "34077f14d1e8c9ce96a9c72e95599326187bd460b88f877794a8c19f9e1b56b4", "rust-std-1.26.1-wasm32-unknown-unknown": "98af245301a921042997a433a618f58ae27b52340ad71c5502ecde7f29db79f9", "rust-std-1.26.1-x86_64-apple-darwin": "d43e06674e645e120af6716e6d0db5771fa8818b5a48fbee9791360086cdec4a", "rust-std-1.26.1-x86_64-pc-windows-msvc": "5223b7dde5b96d278072b4541fdffb7d33c64950af643eba385928763aca32bf", "rust-std-1.26.1-x86_64-unknown-freebsd": "1d63cc1f6dc6dfa2644619cd8c264c3d1be0fe5c44c5454e8ea04bd7beb036fb", "rust-std-1.26.1-x86_64-unknown-linux-gnu": "cc7cec9a121a97e8e23c350305a0e4cd4e3b475fd5a36fa6335a585d3c511f0d", "rust-std-1.26.2-aarch64-unknown-linux-gnu": "6f629b8c3ef8aa4a6c9439a5c1d8719905853f321a1080bb9f8a8356a1b06364", "rust-std-1.26.2-wasm32-unknown-unknown": "260e3267451c8098ac069376e2f4320e129ccec79602086a77f0798499cb5b3b", "rust-std-1.26.2-x86_64-apple-darwin": "712a79cd10b96c7119980e535a36595e03c69a360f1541f690c09de858d92723", "rust-std-1.26.2-x86_64-pc-windows-msvc": "41036c06e00ba038c5ec3940608370e93c6b9a731019d0349841fa78bc8ea125", "rust-std-1.26.2-x86_64-unknown-freebsd": "f54b58bf941d794ee10ab7ee9e1c94a70012073b0ee633ec2be585b1be2e31de", "rust-std-1.26.2-x86_64-unknown-linux-gnu": "91634f05bf2d0a20e627aed08a8450673acecb963869273221de17130540fb26", "rust-std-1.27.0-aarch64-unknown-linux-gnu": "a32ff8d2ab75a229b73076182978e8b97ac1c5447b9446b1d253685ef31652ec", "rust-std-1.27.0-wasm32-unknown-unknown": "aa1afca259ecbee3cf65368e8f9d5e9a0d8ea86be30edf4ecfedecc1db110380", "rust-std-1.27.0-x86_64-apple-darwin": "15ee6418f9b564618e9c81a6dcd7706a2f8ae5ca24fd1b6d7527c97563a47e57", "rust-std-1.27.0-x86_64-pc-windows-msvc": "77c9102d192ed2dda7128dea4e60992d1135c50b85f0ef8e989f0fda3ed3b73c", "rust-std-1.27.0-x86_64-unknown-freebsd": "6e307cc3798b50b37beb9ff43e88b12fb565ddaf051925fffa35bfbeb091d660", "rust-std-1.27.0-x86_64-unknown-linux-gnu": "b8cf36922315ca792929d515327c74b873358a64be4929b2ecfbe23af21e8043", "rust-std-1.27.1-aarch64-unknown-linux-gnu": "00a553c4b5869db1acc4f5fb1f6f954893db507ae01ed754bb8654f8916588e9", "rust-std-1.27.1-wasm32-unknown-unknown": "e16cfda8a8eb29c81d34ea3ca7b4c0815b46ddb85814cbf68320f2666ef44d78", "rust-std-1.27.1-x86_64-apple-darwin": "a521599355e564984e43a63042b1de93dd7cf96730930501f86611dd766384e8", "rust-std-1.27.1-x86_64-pc-windows-msvc": "4745f31711f18e06859946b932909a26d4593552c6631c5710e72d3da26f06ab", "rust-std-1.27.1-x86_64-unknown-freebsd": "12902b61a4897ade258217f045dfac3fe83d49dd52d1e2250bd94c3a10642b08", "rust-std-1.27.1-x86_64-unknown-linux-gnu": "9a1830b522117d68eeec703b50692093352212e035a46baceea666bb37739c2d", "rust-std-1.27.2-aarch64-unknown-linux-gnu": "39bafd1db4f1e881cdbd8d81b757bfef1cad6c06f6aa4514f8b693d997764e2a", "rust-std-1.27.2-wasm32-unknown-unknown": "59ad2323afe090c43e41dce482a4abed1473a7997db5db2ee236d49eac208b70", "rust-std-1.27.2-x86_64-apple-darwin": "eed3688d9f551066593b34f07e4d28846caa99624c2168387993acc6bddd003d", "rust-std-1.27.2-x86_64-pc-windows-msvc": "f5dbee42f3fde455d79e759a4854da78a650df3bcf27f194da78670feb11e10a", "rust-std-1.27.2-x86_64-unknown-freebsd": "6051f8bacbfbd2c3dceeddab8c66274bed7ef260cf346d367c53495cd1567572", "rust-std-1.27.2-x86_64-unknown-linux-gnu": "68984f2233853d3e9c7c56edd72a91b5822157f28fdb42023fb311af68f842dd", "rust-std-1.28.0-aarch64-unknown-linux-gnu": "9ba698f68c5643f53934e1085af40c79c6d1b3bfa01ca6dcdffdc5eec8f44cc0", "rust-std-1.28.0-wasm32-unknown-unknown": "33f9b2d3f568859db28ab32ec4dd388390d408f6204ab44886eec04cc08af843", "rust-std-1.28.0-x86_64-apple-darwin": "bd1b5110d35383349aafad904431d55656b13a3c02ed3b2020d2038557735ab9", "rust-std-1.28.0-x86_64-pc-windows-msvc": "876d68628e6e91113117516621ae4773cdbebdaab1e899d3ec83c612683947b8", "rust-std-1.28.0-x86_64-unknown-freebsd": "1fabaf71d21c1cdcddfb564950152ef862b519a175f7ee88d7e22bab31c4733e", "rust-std-1.28.0-x86_64-unknown-linux-gnu": "c5aed4c7ef362b5754526d26acaccdc9300942fd12e5cc67cc56fc89576a9dab", "rust-std-1.29.0-aarch64-unknown-linux-gnu": "72c0ab49bbdbf819da5018b620aeed22d34af558f4db9598059cb253fc6adec3", "rust-std-1.29.0-wasm32-unknown-unknown": "83449101356a3ae4abf8597913602b1c79dd76cc52bca7a6a3a9f4fdabc565d5", "rust-std-1.29.0-x86_64-apple-darwin": "7fca06854f7c63d1d0da7c46c816af5dd23eb8010603b8cf3f07a61b162f02ae", "rust-std-1.29.0-x86_64-pc-windows-msvc": "b05d04c684e070a820a0a3dc1128a24795895aecf25f6ffa0d68150e6209e424", "rust-std-1.29.0-x86_64-unknown-freebsd": "a59a50a60b033c00cf36c3b8039f300b2997245c21f2d02074f9d3157b54b353", "rust-std-1.29.0-x86_64-unknown-linux-gnu": "0bed2fcba596e1af6f56ed3f5d481b89b28a4ac26aea07128c6630c00c6a136b", "rust-std-1.29.1-aarch64-unknown-linux-gnu": "cf192e05192f79961b9f9e834e19c8b71654ac98b239408a6815d07ff2a96f19", "rust-std-1.29.1-wasm32-unknown-unknown": "48f31123614b5e0799200e0db640ff05c7236d0b6940bedf4043d5d19a2b22df", "rust-std-1.29.1-x86_64-apple-darwin": "9c31fba3bfb816cf6aa8d9d4c3e7f235233035ada95417e130de8487faa507d3", "rust-std-1.29.1-x86_64-pc-windows-msvc": "cb7825c2a1fa46696a429fc7e6afd3f2b396d1467a6e4b5f850ff8dedd73ac1b", "rust-std-1.29.1-x86_64-unknown-freebsd": "aad9e36766284656449dad75cc1c77c7b86da99abfb0ec424689101679aa8a43", "rust-std-1.29.1-x86_64-unknown-linux-gnu": "d05ddae0f05d721de00bf6e40f85f1ccdec902f864b9647e2e1cb08a8202d513", "rust-std-1.29.2-aarch64-unknown-linux-gnu": "f64b051f0b293ee66d7556231dcd70d143525bf6d0b2afc6fae945bf1ffd8073", "rust-std-1.29.2-wasm32-unknown-unknown": "e8317f0677a3d4ee3b4e5f2dffdf0cdb930c77da20676a32099fde477b439d5e", "rust-std-1.29.2-x86_64-apple-darwin": "72cd953cb8ea05667f5d58f5c4ba615a564611a86303c0f8f9235e7a53852692", "rust-std-1.29.2-x86_64-pc-windows-msvc": "5f2320e89946208b14a34d96a2bfc652bf1debe2bbf139fda19f7dc3a5f91694", "rust-std-1.29.2-x86_64-unknown-freebsd": "ddde8a33ddd902471c51f273087d90e9f7f184b7f09f5d14cab454c8c4965ec2", "rust-std-1.29.2-x86_64-unknown-linux-gnu": "1fe9a0f354256483a354ee1b51c60bf9f3f48868581f7cb36d0cc51a82400605", "rust-std-1.30.0-aarch64-unknown-linux-gnu": "0166650de5072545c3945416638dec9beec5ae1f3c72069e314b7c50e18b4819", "rust-std-1.30.0-wasm32-unknown-unknown": "e85afbc075e162e9af71795e1dc81fa0d2cf657dd10b74751f1769585321a20f", "rust-std-1.30.0-x86_64-apple-darwin": "33f4a7574c82db1b1bc3f829d0fecf9047bbac073c305500ada4aeaa08272ca9", "rust-std-1.30.0-x86_64-pc-windows-msvc": "7b493d21ac115dc4a1ef85cf0d8e73f688bda065c3abbdf68ff3674c122fb9e4", "rust-std-1.30.0-x86_64-unknown-freebsd": "4040fe677524e2ead69a2fcab4c16acaad3d4c4f1210ae36f400f82463bdfbc7", "rust-std-1.30.0-x86_64-unknown-linux-gnu": "8514eedc0ed99ab75c61be3137c3e57c4115063ddc07aec842f687ebfc7ceda3", "rust-std-1.30.1-aarch64-unknown-linux-gnu": "64410910d73628a77dfe94dbcd0cd49709b518b5f641fbe4a2476b9af097d47b", "rust-std-1.30.1-wasm32-unknown-unknown": "0892ab95cdfb0bee3c9981e4a5c69a88c0fc5fb7e0c206638291e91a4c794ee0", "rust-std-1.30.1-x86_64-apple-darwin": "a13d4a748914056f34c2e8691b4ca8ab6d16bb04e6e5fafc22ca594789f4e8b1", "rust-std-1.30.1-x86_64-pc-windows-msvc": "177d887593e29847a1bb7afeb7924c3958248a9ec8604e66671d8036e8fbf9b1", "rust-std-1.30.1-x86_64-unknown-freebsd": "66c91d14d8d3c1523f9b5c52b81e4293ba5378fcf8b3e5d0ed52e96afe6bdd31", "rust-std-1.30.1-x86_64-unknown-linux-gnu": "12c4b164efed44c28096fcd141225ee9bf74e7e3395bc6a60c11c9115a0536c6", "rust-std-1.31.0-aarch64-unknown-linux-gnu": "02e5b48d8fff293a95b591646e707a8c61399ab6c244508ed842f3d736ded641", "rust-std-1.31.0-wasm32-unknown-unknown": "ff284b10844cdddca786d85fc3be48796f7286a14350e807fa9912e7748634f0", "rust-std-1.31.0-x86_64-apple-darwin": "7dd4bea941cde8a5ece3286ed43733503c092a8edb50c8c31223a738a526c246", "rust-std-1.31.0-x86_64-pc-windows-msvc": "625e1dbb5996cb9845cb6c779e4a6353faa1e05535471fc00aff6a6f84efeab5", "rust-std-1.31.0-x86_64-unknown-freebsd": "3779f0732ee8fdc1d81663172a72219d59b716e8cc5a6b07bf1d5dd744f74b13", "rust-std-1.31.0-x86_64-unknown-linux-gnu": "fe67a62c7a63acbf2458a36d7689ef41903187a472f0c28850f1fca7ea478da8", "rust-std-1.31.1-aarch64-unknown-linux-gnu": "cc32d23cc2995c4838ab2ed4e709ca9748f13f912e9fbbb7cc78c41dbc4de268", "rust-std-1.31.1-wasm32-unknown-unknown": "a9b1774a6aed9387b12244d2ac0ea047506ffffee67cd834148f01c66ed24e98", "rust-std-1.31.1-x86_64-apple-darwin": "91c3b12614f9795ef2e0092010f247a38d09c95d4089f75b44fad14679bd1cfb", "rust-std-1.31.1-x86_64-pc-windows-msvc": "e84c961261fe70da68dc56effbb277eadeac51fb5bdd2287a168cbe2ba2b1a2e", "rust-std-1.31.1-x86_64-unknown-freebsd": "89e551403f70eed976ac1dd91c3effc9434ef450da4c347d24a141529f83a101", "rust-std-1.31.1-x86_64-unknown-linux-gnu": "699664b3a64959a2d75e486e19e7cc9934cbcbf2c57a977dd2a2b33cff367da1", "rust-std-1.32.0-aarch64-unknown-linux-gnu": "346efe3aef2aff7b71a611bf7661bcec5f9bc4025a599c2866ec5fd330247cb9", "rust-std-1.32.0-wasm32-unknown-unknown": "5da2824a9404204ce2a72b44961e2dd8854fe2232f65851c1a8ff5c59ef537d5", "rust-std-1.32.0-x86_64-apple-darwin": "b736d035a97f830585360e54e3f8877b68c942211cf0a75e805f34bfb36103a6", "rust-std-1.32.0-x86_64-pc-windows-msvc": "cd9693213bcc2ca0ff1490861d3b52703b65df6f678c3f2ae9ad3f3717e08871", "rust-std-1.32.0-x86_64-unknown-freebsd": "d50f674379791a93764d383153ed6533cea165ede7f233df4e17563bfdab273c", "rust-std-1.32.0-x86_64-unknown-linux-gnu": "9f2705a3ed3217c13fd55569406c52f590030752f57520312e135223ae930caf", "rust-std-1.33.0-aarch64-unknown-linux-gnu": "26f13cd80c95d484ccffecf517f1e05ce521072a00f1adea43d02b3f9d37f82a", "rust-std-1.33.0-wasm32-unknown-unknown": "ea1662a05f89f9fb725cba851f6636316cd80052fed610e4912432e4ee523db1", "rust-std-1.33.0-x86_64-apple-darwin": "94247d4d11c631c9d4256f4b0aedd7fd0379fdb55174405c4c1c0dd0c40097ca", "rust-std-1.33.0-x86_64-pc-windows-msvc": "36d94915b8aa9d3207d31ce77bbb790685cb8263920f0873875ae433fcb8709a", "rust-std-1.33.0-x86_64-unknown-freebsd": "8eec7a21a3368890fdf0b826e7bc1928775724c0a4bd14d86304cc7e48309237", "rust-std-1.33.0-x86_64-unknown-linux-gnu": "661c2ba717ae1502f002b4c6e7aeb8941685c7ea8fe7ac26ed9ede26f615b7af", "rust-std-1.34.0-aarch64-unknown-linux-gnu": "d175e91206aba9e2056a9c5af50f700502e70b2f8eb609854d660ac2f3bf1fff", "rust-std-1.34.0-wasm32-unknown-unknown": "4add0e23d048309cd284096c36342a6c2307a293072ced9fceeb6a2a48f3797f", "rust-std-1.34.0-x86_64-apple-darwin": "f5a4fa8e86e1d4483bbe80d0adb08a7f5e466d8173bb5ea596ee698c75d0fd19", "rust-std-1.34.0-x86_64-pc-windows-msvc": "3037d196ac175595de3ddb3c8d26e9795e1765bb083a33da30d2b6afb5b03e17", "rust-std-1.34.0-x86_64-unknown-freebsd": "c012bcf9ee417308fb53b97e58d753f90699bd516bcafd6cc83d6f0a54423f3e", "rust-std-1.34.0-x86_64-unknown-linux-gnu": "6565dbe18ee9fa972058b17744ec1129c4fcbf797443f2e16b999df3870d6281", "rust-std-1.35.0-aarch64-unknown-linux-gnu": "eee3c6a5c7ef5bc21b626ce350b0e1b02310e0463b6686683262f3fef400746d", "rust-std-1.35.0-wasm32-unknown-unknown": "14f1640b35fe351dccd54fc459dea6b7ea199324a723e6d3efc42d519adca99b", "rust-std-1.35.0-x86_64-apple-darwin": "93a640d065d761b85b0f770dfa865b2f86a671a7fac0d5079e4cdc9e4e031011", "rust-std-1.35.0-x86_64-pc-windows-msvc": "2d9091b4a78d7f86b9db5d086b0eebbc2afad4bf828cfbc4b2cc44af86f52210", "rust-std-1.35.0-x86_64-unknown-freebsd": "22e8a2deb83dac920237f810b612b7ea555b03f5830f413a94d007ec683de519", "rust-std-1.35.0-x86_64-unknown-linux-gnu": "5dfa92661ff1a22680785bd6999b6117ae66841e2bd9e5318eb97002956131e4", "rust-std-1.36.0-aarch64-unknown-linux-gnu": "22bfc32b5003c3d5259babb202f3f66be16fa6f3c75c20f429a16d7ef5eb1928", "rust-std-1.36.0-wasm32-unknown-unknown": "7fc1d9f19f6674f73fb89c24aeb741adc59896da6d7ce2e16317aa1fb084bea4", "rust-std-1.36.0-wasm32-wasi": "382dd29fa294ef53272984b9121e07d2b50cc131c561bb7ab72bdebda3abc031", "rust-std-1.36.0-x86_64-apple-darwin": "7c6806809e010e5fba1780007ecff5c31f0ad2fcac1b414b98ca3baa0fb41b36", "rust-std-1.36.0-x86_64-pc-windows-msvc": "2ef035a156b7f20a06677f3873631833afdf9cf755af3fc9466c02d9725755eb", "rust-std-1.36.0-x86_64-unknown-freebsd": "a2a923cbfa3481af66c22673cac38e7cb70e26333318ad59c27b8b6ac16a84fe", "rust-std-1.36.0-x86_64-unknown-linux-gnu": "f92425592c02d4681a5c5ae43ac3ad7ddcc218da50fc651ddc5c2240843a7f31", "rust-std-1.37.0-aarch64-unknown-linux-gnu": "60d64dde9178fdb698b44315b182375916116e30f5fe7f0d8278dd62eb15e7b3", "rust-std-1.37.0-wasm32-unknown-unknown": "b55f82540aa900d2d1d1f6879c9374a8efc78d9eeb20af181ee30182b7f9688c", "rust-std-1.37.0-wasm32-wasi": "551ee5f9adbf24c637e914148c0f161e9e2175aa7d39e5b486d1dd817fb47dec", "rust-std-1.37.0-x86_64-apple-darwin": "0b3fe2575b55a739f409a9d76d05c4bb32494691bde5043d77ba4d39ac182f20", "rust-std-1.37.0-x86_64-pc-windows-msvc": "e03f363296cd60e93110db517f3804631e49fd91de7c0d77b229e31b1135dff2", "rust-std-1.37.0-x86_64-unknown-freebsd": "8783a667ea9c46f27027d494098c51563faa734c5ddb23c6b9b3eda804eb9742", "rust-std-1.37.0-x86_64-unknown-linux-gnu": "09a531a97a16701eb794ecbeeded5d8f8da33da7f1bd372661ad385e3f31c048", "rust-std-1.38.0-aarch64-unknown-linux-gnu": "0725ae9f55639c648fdaba06129de395ed839a7d1aab6aebfd21f26cbe1ce7ca", "rust-std-1.38.0-wasm32-unknown-unknown": "9634130c797e8c1fd1d7bbdfd48a32e85e2dd3512ffb2b51974374308cf581cf", "rust-std-1.38.0-wasm32-wasi": "becb178cecc2d2137e006c24e6988d79390f96dcd65cc2e8b2f475a8fdab4bfc", "rust-std-1.38.0-x86_64-apple-darwin": "b1a986e8676aaed25959e9f6dd7c8c5aa67fb829d0d694edea34d8169658a125", "rust-std-1.38.0-x86_64-pc-windows-msvc": "3f5b3c9a4f9015c9e1e12eed94752129d80448ea53f9d5ec1e332c2ffa2c4807", "rust-std-1.38.0-x86_64-unknown-freebsd": "9f1d88449ef56c31ebc514873ba4d5889fa12697c4c2ea1071f15127f301ac4d", "rust-std-1.38.0-x86_64-unknown-linux-gnu": "cd50ec3384d79aae89ffdacf09715b68b1b5562657e993f26f67b9458e92dfdd", "rust-std-1.39.0-aarch64-unknown-linux-gnu": "adbecacf6cf0ed19df2496cc648b16192c0bd085d7e6f670edcea4dd28ab37df", "rust-std-1.39.0-wasm32-unknown-unknown": "654905b39eae031282a9db9bfa47504c23aa4bbc7d22b769b9bd2f6ca8b61cee", "rust-std-1.39.0-wasm32-wasi": "e7f008fd1f7c902f5ba7777d8a4346783392bd40813c79381bd7497fbcf19be0", "rust-std-1.39.0-x86_64-apple-darwin": "ebd058b16590e2c1a73f5de59d169c8c11be6014934cb083afc84accdccd40d9", "rust-std-1.39.0-x86_64-pc-windows-msvc": "cc704f4c26d5e215a8d98d0797a766fad959101776db69bb392317becd7472ea", "rust-std-1.39.0-x86_64-unknown-freebsd": "94a71addd6983ae844be1cd403926c947766b72f032a083fd1be73f18cf329d9", "rust-std-1.39.0-x86_64-unknown-linux-gnu": "2ddad802f048acaa5cd48f1105c18c7f4de32dc9569ac4d64bfcbb3d8c155cb7", "rust-std-1.40.0-aarch64-unknown-linux-gnu": "e1a1bc577d51556c53e39d4f11fb4918f0ebf27e166ff63321b2991754706d16", "rust-std-1.40.0-wasm32-unknown-unknown": "e3f68aa04c97fb8f5f595d47f417221afb4b0c49d177a2cde7935e3afdd45947", "rust-std-1.40.0-wasm32-wasi": "814d780d7296cc8a8969536f99e8b591fc68d9290e399f01c59cf86d32303718", "rust-std-1.40.0-x86_64-apple-darwin": "1eff41b353403cc284a09debb00cfd41d663447eabf5ad2d4cf736c8c8db0458", "rust-std-1.40.0-x86_64-pc-windows-msvc": "10685476cf7d68e56564730a7d553bacd924717b9272875219da7b9f5ad6704d", "rust-std-1.40.0-x86_64-unknown-freebsd": "90a41f80e2501ac2b036b7cdf269db19a5204aeec257bd585074508f1a6ba2c9", "rust-std-1.40.0-x86_64-unknown-linux-gnu": "735affaca1370699f9bc3fd7b1320694afd250923d283d88c842b7913a97d083", "rust-std-1.41.0-aarch64-unknown-linux-gnu": "59b8dab431af29dcd28c6e92e82a488ebb20dbb5dff93ca14119ba8e2fabd9c8", "rust-std-1.41.0-wasm32-unknown-unknown": "0974d40a9f54bd9dda88c20ffa1778fa90ee77a549a8f30ed13477b55e142a63", "rust-std-1.41.0-wasm32-wasi": "e50c63deae8a8bc81d438f73bc885e5de7fa282784171b53e3eebf8f41d8f7d1", "rust-std-1.41.0-x86_64-apple-darwin": "c917af985d879376d8906e7c81ceacb06e65ea7b229ccf81505f8bd6cf5abf64", "rust-std-1.41.0-x86_64-pc-windows-msvc": "aee5b98f0ac533471dc3d9ffdf6fcb22565c44d80c03c1c4df0c8b714931d1a9", "rust-std-1.41.0-x86_64-unknown-freebsd": "4436e80598592398724daf0efc33b2a6505bebde59c021d3e894d605ae5255dd", "rust-std-1.41.0-x86_64-unknown-linux-gnu": "b563fc979eea8372f5b371e10f0857e79cdffc34b124c7a7b0d89014d1b351b7", "rust-std-1.42.0-aarch64-unknown-linux-gnu": "1343f51fc87049327233cee8941629c3d7dfdc425d359385f93665de3d46711b", "rust-std-1.42.0-wasm32-unknown-unknown": "695439ef4099f2a1da7c9932e556b3985f4ede5b27e6ef260d670bfe4bc3894b", "rust-std-1.42.0-wasm32-wasi": "077bb250b6df47f1350ea875645fd388d3e6df69830ab49627fe6f6bea5887ad", "rust-std-1.42.0-x86_64-apple-darwin": "1d61e9ed5d29e1bb4c18e13d551c6d856c73fb8b410053245dc6e0d3b3a0e92c", "rust-std-1.42.0-x86_64-pc-windows-msvc": "192d8e1277280df261bc917d1dcc8225b5fb507f281d05bbcf85f859679e1429", "rust-std-1.42.0-x86_64-unknown-freebsd": "76e0f0f7275e114908b0ce2bf39813eaa580af92cc1fab31496ca37ba9d5703e", "rust-std-1.42.0-x86_64-unknown-linux-gnu": "e6bf5495a8b1cfb849fce2753404b3b7ce7fba0c5d743d940fac3ee4558fda26", "rust-std-1.43.0-aarch64-unknown-linux-gnu": "f4b80b12ecf14e97937cd24573e82f306f147db6266dc5a2cb27aaeaf49398a7", "rust-std-1.43.0-wasm32-unknown-unknown": "efe2061e7b9711f51b560c7770ebe372003beb9beddb363f27c3960ee12135cd", "rust-std-1.43.0-wasm32-wasi": "6ece090d05853a54bb7f6e4985840cf01dc4857eda0f375bc8e35846d1d533e9", "rust-std-1.43.0-x86_64-apple-darwin": "c75d37579b9e143ebd98ae2fe42c818fd47e0a2763b2a9bdd7e6b9954509d735", "rust-std-1.43.0-x86_64-pc-windows-msvc": "008ca995f429410248558cbfb0e77ebd062ca709a9e3a7d58d9f81c491395280", "rust-std-1.43.0-x86_64-unknown-freebsd": "3c9b450b826874be5c3f35f7cb923f02d4769b81f763fef21c9c0d3a80532c2c", "rust-std-1.43.0-x86_64-unknown-linux-gnu": "84fd8ddaaa217b82c563d4a32a690da2c399388258a3d2baf180992c21938af5", "rust-std-1.44.0-aarch64-unknown-linux-gnu": "fafb49cc7264a8621c17e8954ec2e0a78e097395b285edb5c1639c61ffb8142c", "rust-std-1.44.0-wasm32-unknown-unknown": "8e12796a0c2fb083953042218f832bdeb78da1bfaf67b9dfe3d719920084d755", "rust-std-1.44.0-wasm32-wasi": "ac0ffeb48bd4be6dd460c5665fc52bb4da2be15e5ecdefa4bf73c6db7392759a", "rust-std-1.44.0-x86_64-apple-darwin": "af58f742764949765e09bb60bd1c16025a79a1be8152996fd5b3a44e5df90311", "rust-std-1.44.0-x86_64-pc-windows-msvc": "1f52376c9a48ce76b24c7aad7a9817b0b4b2cf11a8581001c8d76285d9593340", "rust-std-1.44.0-x86_64-unknown-freebsd": "a0315d028e72e221291dba257e8212e564574d87362cb07e06dc15950d1e6788", "rust-std-1.44.0-x86_64-unknown-linux-gnu": "3b7a4eede0ca550c256ca6721877de0154c27e71196d8b9a980a480682ead0aa", "rust-std-1.45.0-aarch64-unknown-linux-gnu": "816f6cc132db84617bfde6ad47336bfb020552a45bd0a10250c4e420d512d5ad", "rust-std-1.45.0-wasm32-unknown-unknown": "1b4f40be1d0f18a5a04f9f706fef74db0e299046557a706a4dc31a2b36d8de21", "rust-std-1.45.0-wasm32-wasi": "1ef0e8e09ad39275a188bc88d4969c4d1e150cd728d9ff5955b42d6a643ac10c", "rust-std-1.45.0-x86_64-apple-darwin": "e3ac5a3efc106ea13687aa1231609a5d61b1874f4b3a2f68b0e0ad70c89a2364", "rust-std-1.45.0-x86_64-pc-windows-msvc": "f638c04f6709382ded2e78aebff03ae5e40e074d003786f083e6e3ccc438e0b5", "rust-std-1.45.0-x86_64-unknown-freebsd": "68b28ad5488bfb051589c7079bdfa396aa42c29d463a5622fb5eb9d6ecc4a8e6", "rust-std-1.45.0-x86_64-unknown-linux-gnu": "7ab1dbcdeab16dfea1ed024675e60429db9719f03648e6a09662de72b4ff730f", "rust-std-1.46.0-aarch64-unknown-linux-gnu": "eaa7cfd73e96b6ce03498398f4bd9ded73870fe3c5db980038a4863c37157597", "rust-std-1.46.0-wasm32-unknown-unknown": "0ef3344aff8ae3f2065ed8f15daa73514a26f934e160cb6974d43a8231fcc090", "rust-std-1.46.0-wasm32-wasi": "44a37dfe4398e1c120a199b2ebbe86838171c38a29a0f76e10ede00bf1aeb16f", "rust-std-1.46.0-x86_64-apple-darwin": "8c897982bc38c9528b448fe551f089fee7716e692dece98052f4459ccc6e591c", "rust-std-1.46.0-x86_64-pc-windows-msvc": "06d92b12e2f4e6024971e99a7716423d4738c3e379fc82aa54de2a812de268b2", "rust-std-1.46.0-x86_64-unknown-freebsd": "e37c06bbe2bf2501675101787388ab87d510ef80f2e091be3f50fc5d019add1e", "rust-std-1.46.0-x86_64-unknown-linux-gnu": "ac04aef80423f612c0079829b504902de27a6997214eb58ab0765d02f7ec1dbc", "rust-std-1.47.0-aarch64-unknown-linux-gnu": "0019c302a0a02d8a9e40c3bcdd5a31b9b2704161563d72df3572521989182b0c", "rust-std-1.47.0-wasm32-unknown-unknown": "b0d19ceb2b56105ee3407bdecaa779747abb1574990632e53a2aba681e964187", "rust-std-1.47.0-wasm32-wasi": "0eab479faac83b9352af04ba4dea376fdeade3101f5e912f40ee3c93e32d1317", "rust-std-1.47.0-x86_64-apple-darwin": "6b86bcdad5a6eff87a67b6387051d7f10a48e088b8f92d76869d201500b9ce13", "rust-std-1.47.0-x86_64-pc-windows-msvc": "896614728a21128c335f632f2f45217320974f71cb4c7c23184610f0b587b7b5", "rust-std-1.47.0-x86_64-unknown-freebsd": "80f5dee782bd74b41c55a676c624ce2260ab54c834102c90ea54e0c5e7e513c6", "rust-std-1.47.0-x86_64-unknown-linux-gnu": "17ecad27d96b331608e4a96dfa3cad05ccb2ccecb888894ed35054e0d1f5207f", "rust-std-1.48.0-aarch64-unknown-linux-gnu": "3b0e5c4d03ddb97cd462947c539005427813f5ba91be81888db77e7d4bf36e45", "rust-std-1.48.0-wasm32-unknown-unknown": "6f981b353e096b8a54c86e6812c82db3b5fd45335b575396e3bfc29b03ffe959", "rust-std-1.48.0-wasm32-wasi": "ed57645e5fe429ef99018759e1a89e090220a3197f30ea544070610ef73c19aa", "rust-std-1.48.0-x86_64-apple-darwin": "430d0ca7c04b0e1140f39f2274e0072a3ba2373a99a230d14ab16361e19b6129", "rust-std-1.48.0-x86_64-pc-windows-msvc": "a526c6f6c00d6a0cd4b6e3348e6329d204099983672862249593ba932b5ddf28", "rust-std-1.48.0-x86_64-unknown-freebsd": "a5ea4ec9664f38a2464216031eeea01f723b4e0691f7d473d8f7ab663551f979", "rust-std-1.48.0-x86_64-unknown-linux-gnu": "2e7152e5d24cea7e44e6645ebbc0387cbe1c7059b54d95d8ea3afe298ac8b2fc", "rust-std-1.49.0-aarch64-apple-darwin": "cf3308806fc3b6fe00ce49f1e63b1cb1d1443cc812eff7947257f31f590465d3", "rust-std-1.49.0-aarch64-unknown-linux-gnu": "c58bd4f0738ff662f70e35c19bfa6b8eb12ad54b0fbdce32ee3e50186c04a969", "rust-std-1.49.0-wasm32-unknown-unknown": "803b4bd43c711753e3e73c210b88a30c4cfe6f3955902d76e2a15a70ad191ffd", "rust-std-1.49.0-wasm32-wasi": "0c97a1f8470719b741186cbb89c4be6a61057fa013d815b2b97fc1043e269d22", "rust-std-1.49.0-x86_64-apple-darwin": "c4389a8534b8da3ae3570646d68fea9a25268b17ed138867e31d4517312759af", "rust-std-1.49.0-x86_64-pc-windows-msvc": "bb55ad626b9d304c0e080fc8731c7978a937c98e873a84834925c525acdbb5e3", "rust-std-1.49.0-x86_64-unknown-freebsd": "ba97f1d751d6656d5efba4b0278a6571e6a56a489670f279bd2c647a90f1679c", "rust-std-1.49.0-x86_64-unknown-linux-gnu": "f0d2c2d509c29ea9f7c24bb5a885321030281631e0bde0714e5cf881184d57e2", "rustc-1.26.0-aarch64-unknown-linux-gnu": "ddddaddb585b95d81854171ac4e02d07790505853cee3034f199c8b7897f32e2", "rustc-1.26.0-x86_64-apple-darwin": "5cb67314656d16cf2a1bdc84213aaaf6afdb5811825c7afba916e2d42d3d641f", "rustc-1.26.0-x86_64-pc-windows-msvc": "427ae4a43a901be288ff3a4dc85d3a14f7e95108cfdaae63e8dbb4a227e07cdd", "rustc-1.26.0-x86_64-unknown-freebsd": "9499ce5b68d631f8345c387e1f59b21892d97e0acb5650deb61a34719310bd38", "rustc-1.26.0-x86_64-unknown-linux-gnu": "7ca9a30010602aaf2244c376a3cc5baa89429d54da17b8ba1cb0cdfdc846cc61", "rustc-1.26.1-aarch64-unknown-linux-gnu": "7a06bd5312cbe8bb19e526b4c9ab04de1628019815a566ce0ff9401515bc2c04", "rustc-1.26.1-x86_64-apple-darwin": "e5f4291c3709b170fbeb17fab7fae50fe0c626dbdc5c42ddb1f342ea03acbad4", "rustc-1.26.1-x86_64-pc-windows-msvc": "e84dca395837aa24b4ea87d46d06a333c2e87d0be5fc5259476a95fbcb05accc", "rustc-1.26.1-x86_64-unknown-freebsd": "dc3dc36010d73349152e6158522e82830fda173007b9299b0a947c90769c54ff", "rustc-1.26.1-x86_64-unknown-linux-gnu": "45bc1c30e0c473c42889f22b182ec6f0b0fc3be0825e1607c64933592486eb2a", "rustc-1.26.2-aarch64-unknown-linux-gnu": "b09fea72e259811fcbc6aade942329bc4588356470765987ee37d6108a82f7b6", "rustc-1.26.2-x86_64-apple-darwin": "5b0a3d94a4fa76ed28859123e35c09a91d7eb8ff65f40ec4c50dfa56ffed8ae5", "rustc-1.26.2-x86_64-pc-windows-msvc": "15eb657747a86a4481501bb21e2dbcf56a06c0beea00e8677c86ef74b8812576", "rustc-1.26.2-x86_64-unknown-freebsd": "48f20a8dc6bc54c90aae685d0c3fa2caf3677f1c4a4d0c53aee9d15588bd0735", "rustc-1.26.2-x86_64-unknown-linux-gnu": "1ebdafe52b581a63cea217a036fd6e77706d2715ae9cfe10a8c715d753326004", "rustc-1.27.0-aarch64-unknown-linux-gnu": "b58c0373df43623adcc990d36190ee157f46f6fba650d0242632f3df2dfbc425", "rustc-1.27.0-x86_64-apple-darwin": "0b00c6971ef524f68b911f621d199e60c339c390b18e12700d55e012b62aa90c", "rustc-1.27.0-x86_64-pc-windows-msvc": "22eeac4f4b4d91c28cf18c6a4a8b477091e6661e3e827c0b32355d52e634a517", "rustc-1.27.0-x86_64-unknown-freebsd": "24c193213450ffacffebdd1413d77fc3c1ed00049cf1ede2d0f3f370dd86b462", "rustc-1.27.0-x86_64-unknown-linux-gnu": "29f399a1a208ea3f27f21e57f2d832e9d801c397a986aaea17e3a2ddeded6c3c", "rustc-1.27.1-aarch64-unknown-linux-gnu": "c48d19ff5474ce75ebbb97e1b26ca8dc23d38f635ae7a3e21b8a4139df5cfb8e", "rustc-1.27.1-x86_64-apple-darwin": "747f616e07e5da9323a21c1cf9d76b53bb46094a68223d461a7333f26c714f19", "rustc-1.27.1-x86_64-pc-windows-msvc": "76abfd523f876516e589f62a83eaaa6e55496745e32f2e9f3f87aca55da3e8b8", "rustc-1.27.1-x86_64-unknown-freebsd": "9b199c21094f996fd9d4b620a5ff2c4bc5b8dab13e96bdf7c113291f601ec944", "rustc-1.27.1-x86_64-unknown-linux-gnu": "a6bf6205b345b854d705d0028a4e7161a0f5b209e464130e7d135fa01a296dc1", "rustc-1.27.2-aarch64-unknown-linux-gnu": "c1a5ddc6e40be5eef7afad8c126c6f426d07eb1a297902c7ef871279fdbeea49", "rustc-1.27.2-x86_64-apple-darwin": "b5c5edd2094afd0a92ad776dbd12cb6ee37800b940437dece10229ccacd1f561", "rustc-1.27.2-x86_64-pc-windows-msvc": "c00dde7df7475340f5574b09c86d0e19f6707f838bf95d2ff463a8f4d4d76d33", "rustc-1.27.2-x86_64-unknown-freebsd": "66d739632574fa52e82b40aca0eb4cef7a38047ed67cd6a240d8798a3cf9b6a6", "rustc-1.27.2-x86_64-unknown-linux-gnu": "ec3efc17ddbe6625840957049e15ebae960f447c8e8feb7da40c28dd6adf655f", "rustc-1.28.0-aarch64-unknown-linux-gnu": "09d1fa08d7403495ca07565eaabfcbe6703e842b765a68d5110cf4e64e988476", "rustc-1.28.0-x86_64-apple-darwin": "10a5bf35177508c72050149663ff679a770eafa8557c6be0052603ca1267ae4d", "rustc-1.28.0-x86_64-pc-windows-msvc": "39871017768fe779dbffaaff8696baf0788bb9c4d6c4caa3d2564e1153ab2199", "rustc-1.28.0-x86_64-unknown-freebsd": "5eeaa17844f87e59aab821dc98dd15a920df0d1d7da3ef5808d2c586331c92a7", "rustc-1.28.0-x86_64-unknown-linux-gnu": "008bb3d714544bc991594b29a98a154441914c4771007130361bbadfb54143d0", "rustc-1.29.0-aarch64-unknown-linux-gnu": "c7480c0b98ae84151ffa8cadcb06d1ed2a11a755b6619ac1b89e7c886e98b7ff", "rustc-1.29.0-x86_64-apple-darwin": "3462ba7e841485f93251762ce0b36a3922830a1249e5d79d6d010ceb43e4ee3f", "rustc-1.29.0-x86_64-pc-windows-msvc": "b27c38cb60092e9cac8afc4ad760349821e6b068d986e13ad46233b9676ab35e", "rustc-1.29.0-x86_64-unknown-freebsd": "38f30c96f0fa7ebfe94cd2db57e9b99961feca0a09045dbc1e955404b5d7f40a", "rustc-1.29.0-x86_64-unknown-linux-gnu": "229c51d51efc239e6eb9b428795bb7f57309f11287705dcba4877d5e220102a0", "rustc-1.29.1-aarch64-unknown-linux-gnu": "784ea61ff852225be622141600c79621456f1ad9f9becdf7070eb0217b8635aa", "rustc-1.29.1-x86_64-apple-darwin": "64b86c923786dfafe8bbb5fcbef0d854132f29f0bf635830cd2d95ff225d2317", "rustc-1.29.1-x86_64-pc-windows-msvc": "2675bf444df8fe900b84098917db3e765c87ad3c812ef2a818c7e622d77db457", "rustc-1.29.1-x86_64-unknown-freebsd": "ed9b2ccbfc6028ce2c73105cebebdb9f2e2332018c687951639176358bfed9a2", "rustc-1.29.1-x86_64-unknown-linux-gnu": "b99324394ba20bd12efa9d30dad72b10747bd075f97c7a9fd0ce3f9394383fa7", "rustc-1.29.2-aarch64-unknown-linux-gnu": "54a8c54f04dec72d7f8655ce1c3037dc23ded2f9ada26e7ea77aa45fc8b0d0c5", "rustc-1.29.2-x86_64-apple-darwin": "d9c0dd8127ed632e27d751f051bca933578317ffe891e39155ae721bc1d3ec05", "rustc-1.29.2-x86_64-pc-windows-msvc": "53dcf97ed9461784d713c5a413df7e8e5aa4c9158a4d5921a038b77b17120a17", "rustc-1.29.2-x86_64-unknown-freebsd": "94fba7a7b88ca86c037a48376b7e09bb4ca66e1268fc8d664796cdbdee97c0fa", "rustc-1.29.2-x86_64-unknown-linux-gnu": "b04146b09edc4bad0de7c8fa1a5a2aa4416d365c03c5962b8a5b26c7047b7cc9", "rustc-1.30.0-aarch64-unknown-linux-gnu": "ccff6c6d8386655955265f586862314dd3b646bbeccd1369877f4343b1960a53", "rustc-1.30.0-x86_64-apple-darwin": "d4fcbc61c7323e6fa1001ae268c5db1693ff07e5ef1ac25907138a2ee7bd8faf", "rustc-1.30.0-x86_64-pc-windows-msvc": "2d2d1a51bb15794920a2f0cccf7fd2c8bfb037d00975e799ff4a4ac3b83032ce", "rustc-1.30.0-x86_64-unknown-freebsd": "68a74949e34118406673cf8cc0098b011907c840890e0640aa3b145ce91c521d", "rustc-1.30.0-x86_64-unknown-linux-gnu": "cc45058e9963d33ca28220e752d9e360b7e05f17e34284f5f8197738c3a88444", "rustc-1.30.1-aarch64-unknown-linux-gnu": "f3569c0a74f07aa2e56bf93c9f2aaddf7434ce17f85d6d6ff854fb9245888bcf", "rustc-1.30.1-x86_64-apple-darwin": "fd8ca09595e9d686aef9e3b94259500b482cf7a01de167a8c72a4f8d19a604f3", "rustc-1.30.1-x86_64-pc-windows-msvc": "8ad1551132de8c766d2d7c66d9bb93a959ebbfa7d86c47f196227fea914583dd", "rustc-1.30.1-x86_64-unknown-freebsd": "2f79e386bed201eb9b6ffa58240742617ec6006accb559dab7b6424f33b65b5f", "rustc-1.30.1-x86_64-unknown-linux-gnu": "d84de208499b59e4a3c074f9f3f2fcbb26fb20d6bfd19262e6d5f4181ddbe34d", "rustc-1.31.0-aarch64-unknown-linux-gnu": "1e480d8cadceff39ad39d30fe874bfd485386c98842f16423310cb2ada1923c0", "rustc-1.31.0-x86_64-apple-darwin": "250fd3f3aba7d38c4af9682a12a37c733dbd6dde127665b0f493551e6c4aea8b", "rustc-1.31.0-x86_64-pc-windows-msvc": "418abc285870ab4d85d53769eac229cd66b7fc7cdaa6e73699530e88ee5dfaf4", "rustc-1.31.0-x86_64-unknown-freebsd": "9ec40454e22e3494b9859c03e37e8851077f897845bcf838d69d4393900e7b33", "rustc-1.31.0-x86_64-unknown-linux-gnu": "5c4581f0fc05f0f5076db6231b0c1a4d27eb61c0b36bfb42d97243ad8f4e43a0", "rustc-1.31.1-aarch64-unknown-linux-gnu": "315ea9c981e4320a557f6c75b58242c0598a90316f610b4dfef5d06e82b927f2", "rustc-1.31.1-x86_64-apple-darwin": "e3f9c5ccd0e6e09da8012f30ee9a1880efebc0c039cc1f3866cf50c984be16a7", "rustc-1.31.1-x86_64-pc-windows-msvc": "0320b7544de463d4444c6445fd2e23044e28fde1173f614145a72a4bcfc6ccd9", "rustc-1.31.1-x86_64-unknown-freebsd": "fb38ad94976c273c0fb95d0b5ba2d1ce90684e58fa06fafc9f8050ba00559f50", "rustc-1.31.1-x86_64-unknown-linux-gnu": "77d47ce7e27a146e4301f11befd43f3fc5ac195ace0dfc07ac8154f130b057ea", "rustc-1.32.0-aarch64-unknown-linux-gnu": "193cbe67161e20a0bf4eeb8bafeb302f3e61a59ca939a0454fc3fbc76e9524cc", "rustc-1.32.0-x86_64-apple-darwin": "0334c4568f09cae984e53e4a3f4ff207e2bcc50fce13ad32b8eca89f014e5e61", "rustc-1.32.0-x86_64-pc-windows-msvc": "a7799495d3032c5ad6b5f712f7d7a9538f695c6d8d2e5258c0f7aadac8cea1d4", "rustc-1.32.0-x86_64-unknown-freebsd": "a14a0e288be8ce894a85810151a2eb70fc86afa36e4a5fae4e903c744b888687", "rustc-1.32.0-x86_64-unknown-linux-gnu": "75c31f32e19548c1608611d08b82b87560e02f15caac7b2663a8189a4609977c", "rustc-1.33.0-aarch64-unknown-linux-gnu": "e23141cc65d1d8e3957a96f3a601bdb7a9d09026ac20396aeaebd2613ea0d08e", "rustc-1.33.0-x86_64-apple-darwin": "ea1f0a95015bbefba9eac5890b12ee2887f464822ab579c8bbc2db3023c6dd08", "rustc-1.33.0-x86_64-pc-windows-msvc": "b935a78d072b9ae91ff8ddf9155df95d77fd8a1c6293e39df3c65b18d860320e", "rustc-1.33.0-x86_64-unknown-freebsd": "8bfc7fc50c50294cf4ded35360b41b590180401a0d2e84256f5931c7c1ff35cd", "rustc-1.33.0-x86_64-unknown-linux-gnu": "54a342f718b712d8a17fd7878ebd37d22a82ebc70b59c421168cd4153fd04c2b", "rustc-1.34.0-aarch64-unknown-linux-gnu": "364328a40c7aa5749be80b13a14466149a559205e34aef3d8823dc2580f55921", "rustc-1.34.0-x86_64-apple-darwin": "2044d44f01a8aa7fb3382f35fc839facfde4fc1eb6f951ead42aef954e317088", "rustc-1.34.0-x86_64-pc-windows-msvc": "371f9abd2bc615b339dfd606d93e6b4892594fd86084d513e07a9f80ff21a828", "rustc-1.34.0-x86_64-unknown-freebsd": "522662f147d0550e4f4f49026b4ebcc5e05a0935fa88acc9b99da5d7435755aa", "rustc-1.34.0-x86_64-unknown-linux-gnu": "5852e84dd30e4a552a7cd4d7c0172648d7ffb4d9ac7078871adbb902c183ffc2", "rustc-1.35.0-aarch64-unknown-linux-gnu": "dc06d77e6cdc06693d3b87ce473f151c96bda2c1e5dbba8c0354c54990c64fc2", "rustc-1.35.0-x86_64-apple-darwin": "5b2fb7581332f349c041860479ffdbfec0eebf87fc3016146836b8868afc3ae5", "rustc-1.35.0-x86_64-pc-windows-msvc": "df4f94d29d10fde2486d9fac3247a566d99a2b7f97fa6ebd416f308b804f7693", "rustc-1.35.0-x86_64-unknown-freebsd": "d3b5a6cfa41264e1873287bdb89892a7edc40333d581f468890c68336f50a601", "rustc-1.35.0-x86_64-unknown-linux-gnu": "bb3a07a1f2fdc3eeeee25fc40131d3f05494e3838dfd4e9275475ffc500d7a9e", "rustc-1.36.0-aarch64-unknown-linux-gnu": "62e40e0677032ae0cd91a7f8b4450dbaaf5223050a05b28a9174802d09691da6", "rustc-1.36.0-x86_64-apple-darwin": "97568272717ffa62dbf4459dff6086e69c808df252a912146e28468412667013", "rustc-1.36.0-x86_64-pc-windows-msvc": "4c131f68eac74bc20315eda097578c43de2b695445739462a4b273f90a131ffc", "rustc-1.36.0-x86_64-unknown-freebsd": "c2dd0cec49b054ed9439762fb31555b8df9a3d81747b194f7d3afbc6d8adb8de", "rustc-1.36.0-x86_64-unknown-linux-gnu": "7c149fa1695b41e8e1edcb95dca199522889f119be99f922741084d50470a9e5", "rustc-1.37.0-aarch64-unknown-linux-gnu": "721ba21dbe9b350a8c50a4c783c76ba3f6926525480518851dd6ba92ecdb042c", "rustc-1.37.0-x86_64-apple-darwin": "00d4d15b4d9a4d188e0db8bbc17cd5f0c3c3a87ad681e80ef15580c0d5bd4ff3", "rustc-1.37.0-x86_64-pc-windows-msvc": "790bdb5b57f397d7481151ad8715f7ac3f32b343efaf2922650f4fc6e374d7d7", "rustc-1.37.0-x86_64-unknown-freebsd": "a4dd357a0b39abf1ebbe8a0f64973c3b0c5bc527e374c12afe51266279fc1ca6", "rustc-1.37.0-x86_64-unknown-linux-gnu": "c759b318f333639a45f29c1551ca7ce55b1bf64e0fc3a3357d6b9356885d1626", "rustc-1.38.0-aarch64-unknown-linux-gnu": "0c787eaf01b5779b5a0c12bd0573901cf1b58e5e484ad44c3530b7ed51754d15", "rustc-1.38.0-x86_64-apple-darwin": "ac34aee5a5f67003b8f7f857ddb1fa68f89a32680a591ab77561282721b75256", "rustc-1.38.0-x86_64-pc-windows-msvc": "6e00ee5f34c552c1b9fafec3b7a1330140c820a2ae4bd4213d2c4f135341a88d", "rustc-1.38.0-x86_64-unknown-freebsd": "1d99318bbdc947c6dc375215f0eddcd767348c309811cd141e5d18e17d5aaaa4", "rustc-1.38.0-x86_64-unknown-linux-gnu": "790a611695fabd12c3a141efa58b3dc5913d749947c1a95d3f5b6eb5476ee612", "rustc-1.39.0-aarch64-unknown-linux-gnu": "c64fc482404277fdb160a4b593b0be5a1b0c32d985464595015295321d111621", "rustc-1.39.0-x86_64-apple-darwin": "9347ffb47e936fb44666ada525f8bfb86758a719e7c0330e93e17bbd5f3623be", "rustc-1.39.0-x86_64-pc-windows-msvc": "9a94785fdb473079d02f32bded6691322688001dcc16f5bfb582c1d181d3ef67", "rustc-1.39.0-x86_64-unknown-freebsd": "3714bf7bd4163a3bfe18291d49acaeda02f4bf2beb9fe36c520d2ecdc29ca031", "rustc-1.39.0-x86_64-unknown-linux-gnu": "333399dbf96dd6b8a9dc9cc56b1cb5d8aac2296b4e4aa857bd59d906d6df6fa1", "rustc-1.40.0-aarch64-unknown-linux-gnu": "8981d500261ecfec93c4b52e8f96a81c705b56ff9317d63e0363d11a72ee09a0", "rustc-1.40.0-x86_64-apple-darwin": "f45bb00a9a59ca819a8266e9de77f7232f4b704d64f1c45d3870e2db4f646a77", "rustc-1.40.0-x86_64-pc-windows-msvc": "16299638792b7bffb63ca20674a7196a33d1fb25e91083b90f8015be010eec19", "rustc-1.40.0-x86_64-unknown-freebsd": "65810804d3e4cf8f845978c6226f8e23d77a7ccf35ebafdd5f8dac027627f396", "rustc-1.40.0-x86_64-unknown-linux-gnu": "5085a26abdc932fd9339aab2078084f9ab654f8298ad9f301611ac41ba8eca19", "rustc-1.41.0-aarch64-unknown-linux-gnu": "9d994935f92088c968f520f558a88b140bb7d60e917fc4ad69019e2b830b1db7", "rustc-1.41.0-x86_64-apple-darwin": "25ee8865e21007c282cd1f3457c3bf932591337c3044e55ba574fc988bead3ad", "rustc-1.41.0-x86_64-pc-windows-msvc": "b338afb534be113f179252f8de29195e201dcd8bf4053b1d5e8eef928c457ca3", "rustc-1.41.0-x86_64-unknown-freebsd": "de3386f79a0e261b8f6133dc0d5a7d51b70ad73dba5a14dd30204ac285d04f3a", "rustc-1.41.0-x86_64-unknown-linux-gnu": "531b4cc77cc25e960aafa2ebaee073c137fceb0004447c6b7274557281c62a6d", "rustc-1.42.0-aarch64-unknown-linux-gnu": "612c10793852fd0c2e52b30f3d50dd6aef6f8181032b820eddefc93e3bf4d97b", "rustc-1.42.0-x86_64-apple-darwin": "778dea93d7e46261e2c06cadec35b68f9857604f279ce6fbd1b37c1a89634625", "rustc-1.42.0-x86_64-pc-windows-msvc": "d132f99df49cb0d421f6d8948a268d4eddb1ae23e0af2641272438998503708b", "rustc-1.42.0-x86_64-unknown-freebsd": "e6e36a7df9886b18cce32752f5ac7a8da6977c6a1878fae696340f3843176fe5", "rustc-1.42.0-x86_64-unknown-linux-gnu": "4242a728b850bf6e74db9a95c68e8ed316fa4813b38e6b8bc296396b5f47ea5a", "rustc-1.43.0-aarch64-unknown-linux-gnu": "99f26a2b4376fc08203d129d65e15f01b2630db40dd2d4d6a7b917df8d512e72", "rustc-1.43.0-x86_64-apple-darwin": "3723b8194e38d7238262b4cc49762a22037f53f58ab1df199c1d710dad5728a5", "rustc-1.43.0-x86_64-pc-windows-msvc": "c6d1aa60cf2056c4fb35a5a197fb4e1a42887eb4ad1615b00398524ff78ce74c", "rustc-1.43.0-x86_64-unknown-freebsd": "69d572e80e13da85599557f662ce71909823194c874eea0fe91f82da0958fa68", "rustc-1.43.0-x86_64-unknown-linux-gnu": "950b323044ae9a7932b697a2e4f4f62b59248f58faa320e22dc20f8ad9521f6b", "rustc-1.44.0-aarch64-unknown-linux-gnu": "b0fc4cee7119c10f79fe2701ca0d19ab738bd20954352ae5b1dcc4c6f432779a", "rustc-1.44.0-x86_64-apple-darwin": "4fd09afcae85f656d4a545ee415e19546e03e34f1ca23be5eaa68c489e3186ab", "rustc-1.44.0-x86_64-pc-windows-msvc": "0b3aec27d86034cbadf4adbaf36308bcf98d97c0979d162ffccf4328fb4f96cd", "rustc-1.44.0-x86_64-unknown-freebsd": "6f3c4e16bbda8719e5c07dc687e84a7236e097da55c4fabea13ef1cbd6a30c40", "rustc-1.44.0-x86_64-unknown-linux-gnu": "52671652e7045df0702d8f2e8af60bf6f20da3e3a5db1aa6022bf9545e914449", "rustc-1.45.0-aarch64-unknown-linux-gnu": "b1ef2ea19142d851f2ee6936cd46a30ec8f157ba53048bc2748279d1e9e0ad17", "rustc-1.45.0-x86_64-apple-darwin": "fd17d99c3e827f0b4f01b9122d4bf2fca0f1144827300a1eda93718d8642b39f", "rustc-1.45.0-x86_64-pc-windows-msvc": "f65fb383f2c6f979a19acbd4e099e6eea8addc0e76f1fd988582dfc0daa4a121", "rustc-1.45.0-x86_64-unknown-freebsd": "b5d263c53320f8a5dd5daceac1e60da172fd21614ada67f584565430d9d1c9c6", "rustc-1.45.0-x86_64-unknown-linux-gnu": "3ef2fcf818c133c3e9957441917b23ea536805efd0ff9ac6ee0bea349d703a90", "rustc-1.46.0-aarch64-unknown-linux-gnu": "41239ece19c79250a205e5b2fae60b242bba4bf72b687bccc88f011e66a872b6", "rustc-1.46.0-x86_64-apple-darwin": "f690b375df7b1399e5baa69b64932e3e4a3f2b651e5ef2ebc85509bee777a9d9", "rustc-1.46.0-x86_64-pc-windows-msvc": "56badce580b65f59d676b20b4e5f138969e5039182b7f6052ac7da9d38bd0aca", "rustc-1.46.0-x86_64-unknown-freebsd": "e76d3e18d1826753395d881bc37be3d43e9ff8d2d34d49d7ed6105f228d56284", "rustc-1.46.0-x86_64-unknown-linux-gnu": "4c0c740cfb86047ae8131019597f26382a9b8c289eab2f21069f74a5a4976a26", "rustc-1.47.0-aarch64-unknown-linux-gnu": "2e143bfa59eca5c3f3e995c5997ae55c7defe824fb4dbe7e77896e132f42c24b", "rustc-1.47.0-x86_64-apple-darwin": "4773ad46b912c859984f1e4466e506dd8102603d1ffcd8b63cfe7522f49e5987", "rustc-1.47.0-x86_64-pc-windows-msvc": "f2010e4500602d0efc431c0853692733415bedb58652376023d7d6ac204f8c7c", "rustc-1.47.0-x86_64-unknown-freebsd": "811f298c07fb32a6a01f9960f2d7dc403f6f288a3f475ed9806648e2cc5938ca", "rustc-1.47.0-x86_64-unknown-linux-gnu": "d96be0ae1deada01f41372ab2c2f485a9f8625069aeaff33c5b513061e9706d4", "rustc-1.48.0-aarch64-unknown-linux-gnu": "9c83a5d18f6ca913eeffd78c53913da288b171ff245137b646a8fd280fe72340", "rustc-1.48.0-x86_64-apple-darwin": "846f45f9bd6676e9d1f6758279b48e32564ba23773e69aa89692dbc123dbea5a", "rustc-1.48.0-x86_64-pc-windows-msvc": "395b2a8e6824b3e56a8a9b4598273be5410b4ea64e92c8aeaf900d9ff21f470f", "rustc-1.48.0-x86_64-unknown-freebsd": "fbaff313c2423f1ababc9792332560ca0e3749abf3749e7eb5289bc6515d9424", "rustc-1.48.0-x86_64-unknown-linux-gnu": "aa4a96b010e0d4573e6a1fec230beaadaae6cdce2bb4befeee7b1c081ee9ef8c", "rustc-1.49.0-aarch64-apple-darwin": "3e8c0c9101f27623f7607f2d8acef5f28dcb2bdfcded56f210d9d370cf9a9c06", "rustc-1.49.0-aarch64-unknown-linux-gnu": "b72699cdf74c03ccc0aabab937a69807f2ceb5861f3508593e1c222190c4efc7", "rustc-1.49.0-x86_64-apple-darwin": "09333f9aacb9c5959e2a2798d7e283cae674255f063a35ea28f91595caa0a78b", "rustc-1.49.0-x86_64-pc-windows-msvc": "800b7571438850074aeb0fb9a0e7d890c6785f9f4823b3052b9b0b098bb9ddd4", "rustc-1.49.0-x86_64-unknown-freebsd": "66427837606aba2cda99d4f52161bee1086e98b226a5cb99be8e9a7bf896495f", "rustc-1.49.0-x86_64-unknown-linux-gnu": "42300556b987934e5e4677972c1dfc57eb07731dc62fa9f4f561935a1c84ed0e", "rustfmt-1.4.20-aarch64-unknown-linux-gnu": "ff4e43883ee4419038b91ffea0cd18ee9450b056b9ff48cd8cab53bc37bc07cb", "rustfmt-1.4.20-x86_64-apple-darwin": "67cf0e46f629defb0faed1f98b50326d0220b22a93b3012f055070fae5e30005", "rustfmt-1.4.20-x86_64-pc-windows-msvc": "5292420e6c2943d74f2723a512f713e3f8c02012d465de4cbb40e4a38bc78988", "rustfmt-1.4.20-x86_64-unknown-freebsd": "43807828886baf511581114c4a912e08dcb94386b4b3e72a77d3ee7dad424803", "rustfmt-1.4.20-x86_64-unknown-linux-gnu": "dae81512815475e9e15f97c6aa511ae178cc90695e364f005087214296fb4928", "rustfmt-1.4.8-x86_64-apple-darwin": "9ff48a5a0ec693e28a3cf408019ba67544dea4b0ea119ad572c2f83d387d9ae5", "rustfmt-1.4.8-x86_64-pc-windows-msvc": "c2a03ccd03d507fefa21b6861cabf7033de64d276988e792f9e78ff5b12a26cd", "rustfmt-1.4.8-x86_64-unknown-linux-gnu": "4d6f813ef721821352a5e447ba1b6a69c04e2b43cec24d379e0c7a0528932d26", "rustfmt-1.48.0-aarch64-unknown-linux-gnu": "28f7d1ef37c034033eb0e30a13e5f0ad5bbc506adb8a8a9c03adce2b0d4842d5", "rustfmt-1.48.0-x86_64-apple-darwin": "cfe593a9446e7dfa52ded8a7cca174ba0c2d1cac6e865d04e0890282f25d22e2", "rustfmt-1.48.0-x86_64-pc-windows-msvc": "96d779befe8bca88d3cb69723d401d290a4a637746e8cc119126cfe9d5c773ee", "rustfmt-1.48.0-x86_64-unknown-freebsd": "ae84ca6d0841e6be0f140efd67693a1a50520e6610f26e5ee57a15b5a9947588", "rustfmt-1.48.0-x86_64-unknown-linux-gnu": "12d185cfd6ce15e4df3590bf1b9b3233df75e7aa14b42a9269b4235347a14b2b", "rustfmt-1.49.0-aarch64-apple-darwin": "4f03d2913ecff9b534bc6c2c7684d0884958a1c8f12668fea86c0aa4371231ae", "rustfmt-1.49.0-aarch64-unknown-linux-gnu": "9ef9c477911b3718539defa18ef5838b6f479e646d82e410643e5e8cb21791dc", "rustfmt-1.49.0-x86_64-apple-darwin": "e505092d5525dca1012d57e9c9dfd048cbbe2890e02e1327c1a0af44cd3d7aa1", "rustfmt-1.49.0-x86_64-pc-windows-msvc": "e094798983f77ef95e28db1c561915f992f3a190813162b33e2bc6942485a485", "rustfmt-1.49.0-x86_64-unknown-freebsd": "ed7465ddcc654b32822e48a8e91cd58391c36210b332f054a9ab5c1e5733ae74", "rustfmt-1.49.0-x86_64-unknown-linux-gnu": "a1b1a9c06b9958116c37e212c5e04d921f78967e9f9956f6249a16e033f67a03", }
115.110375
130
0.833608
9,012
104,290
9.57368
0.108522
0.037959
0.024479
0.026519
0.320962
0.311076
0.273476
0.136072
0.017525
0
0
0.443996
0.051903
104,290
905
131
115.237569
0.428583
0.001026
0
0
1
0
0.896329
0.896329
0
1
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
1
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
7
d9ea74428301d248b01bad9ff0096a5740710b96
29,237
py
Python
tests/unit_tests/test_michelson/test_micheline.py
konchunas/pytezos
65576d18bdf1956fae8ea21241b6c43a38921b83
[ "MIT" ]
98
2019-02-07T16:33:38.000Z
2022-03-31T15:53:41.000Z
tests/unit_tests/test_michelson/test_micheline.py
konchunas/pytezos
65576d18bdf1956fae8ea21241b6c43a38921b83
[ "MIT" ]
152
2019-05-20T16:38:56.000Z
2022-03-30T14:24:38.000Z
tests/unit_tests/test_michelson/test_micheline.py
konchunas/pytezos
65576d18bdf1956fae8ea21241b6c43a38921b83
[ "MIT" ]
34
2019-07-25T12:03:51.000Z
2021-11-11T22:23:38.000Z
from unittest import TestCase from parameterized import parameterized from pytezos.michelson.micheline import blind_unpack from pytezos.michelson.types.base import MichelsonType from pytezos.michelson.forge import forge_script_expr, forge_micheline, unforge_micheline from pytezos.operation.forge import forge_operation_group unknown_data = [ '0501000000056f776e6572', '050a000000160000e8b36c80efb51ec85a14562426049aa182a3ce38', '050100000006706175736564', '050303', '05010000000866616c6c6261636b', '0502000000270316031607430368010000001655706172616d4e6f53756368456e747279506f696e7403420327', '0501000000086e65774f776e6572', '050306', '0501000000096f70657261746f7273', '050200000000', '050100000009746f6b656e436f6465', '050100000005545a425443', '050100000009746f6b656e4e616d65', '050100000005545a425443', '05010000000b746f74616c4275726e6564', '050000', '05010000000b746f74616c4d696e746564', '050000', '05010000000b746f74616c537570706c79', '050000', '05010000000d72656465656d41646472657373', '050a000000160000e8b36c80efb51ec85a14562426049aa182a3ce38', '0507070100000004636f6465010000000863616c6c4275726e', '05020000054903210316051f02000000020317050d0362072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000c20321074303690a0000000f0501000000096f70657261746f72730329072f020000002507430368010000001a5553746f72653a206e6f206669656c64206f70657261746f727303270200000000050d0566036e072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b206f70657261746f72730327020000000003480339072c02000000000200000026074307650368036c0707010000001353656e64657249734e6f744f70657261746f72030b0327051f02000000960321074303690a0000001305010000000d72656465656d416464726573730329072f020000002907430368010000001e5553746f72653a206e6f206669656c642072656465656d4164647265737303270200000000050d036e072f02000000310743036801000000265553746f72653a206661696c656420746f20756e7061636b2072656465656d4164647265737303270200000000034203210316051f020000000203170321051f02000002a8034c0342051f02000000020321034c051f02000000020321034c03160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000290317074303620000034c03420743036801000000104e6f74456e6f75676842616c616e636503420327020000000003210316071f000202000000020321057000020317034c034b0356072f020000002e0316051f02000000020321034c031703420743036801000000104e6f74456e6f75676842616c616e6365034203270200000000051f020000000d0321051f020000000203170316034c03200342051f02000000020321034c051f02000000700321031603300325072c020000002603210317034503300325072c020000000e0320053e076503620760036e03620200000002034602000000020346034c03160743036801000000066c65646765720342030c051f0200000014072f0200000004053e03690200000004030c034603500317033b051f02000000900321074303690a0000001105010000000b746f74616c537570706c790329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c537570706c7903270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c537570706c790327020000000003120356072f020000002a07430368010000001f496e7465726e616c3a204e6567617469766520746f74616c20737570706c7903270200000000030c0346074303690a0000001105010000000b746f74616c537570706c790350051f02000000900321074303690a0000001105010000000b746f74616c4275726e65640329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c4275726e656403270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c4275726e6564032702000000000312030c0346074303690a0000001105010000000b746f74616c4275726e65640350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000863616c6c4d696e74', '05020000042d03210316051f02000000020317050d0765036e0362072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000c20321074303690a0000000f0501000000096f70657261746f72730329072f020000002507430368010000001a5553746f72653a206e6f206669656c64206f70657261746f727303270200000000050d0566036e072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b206f70657261746f72730327020000000003480339072c02000000000200000026074307650368036c0707010000001353656e64657249734e6f744f70657261746f72030b03270321051f0200000232051f02000000020321034c051f02000000020321034c03160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000350321031703300325072c020000000c053e076503620760036e0362020000001503210317051f02000000060723036e0362034203460200000036051f02000000020321034c0317051f0200000004032103160312051f020000000d0321051f020000000203170316034c032003420346034c0321051f020000003203160743036801000000066c65646765720342030c051f0200000014072f0200000004053e03690200000004030c0346035003170330051f02000000900321074303690a0000001105010000000b746f74616c537570706c790329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c537570706c7903270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c537570706c790327020000000003120356072f020000002a07430368010000001f496e7465726e616c3a204e6567617469766520746f74616c20737570706c7903270200000000030c0346074303690a0000001105010000000b746f74616c537570706c7903500317051f02000000900321074303690a0000001105010000000b746f74616c4d696e7465640329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c4d696e74656403270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c4d696e746564032702000000000312030c0346074303690a0000001105010000000b746f74616c4d696e7465640350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000963616c6c5061757365', '05020000014803210316051f02000000020317050d036c072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316032003170321074303690a0000000f0501000000096f70657261746f72730329072f020000002507430368010000001a5553746f72653a206e6f206669656c64206f70657261746f727303270200000000050d0566036e072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b206f70657261746f72730327020000000003480339072c02000000000200000026074307650368036c0707010000001353656e64657249734e6f744f70657261746f72030b032707430359030a030c0346074303690a0000000c0501000000067061757365640350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000b63616c6c417070726f7665', '05020000037103210316051f02000000020317050d0765036e0362072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703480342051f02000000b40321074303690a0000000c0501000000067061757365640329072f02000000220743036801000000175553746f72653a206e6f206669656c642070617573656403270200000000050d0359072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b2070617573656403270200000000072c0200000027034f074303680100000018546f6b656e4f7065726174696f6e73417265506175736564034203270200000000051f02000000020321034c051f02000000020321034c0321051f020000008703160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000060723036e036202000000020317031703160329072f02000000060743036200000200000000032103300325072c020000000203200200000043051f02000000020321034c0317031703300325072c020000000203200200000022074303680100000015556e73616665416c6c6f77616e63654368616e676503420327051f02000000020321034c051f020000000403210316034c0743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000140723036e036207430362000003420723036e0362020000000403210317071f0002020000000203210570000203170317032103300325072c02000000060320053e036202000000020346071f00030200000002032105700003031703160350051f020000000d0321051f020000000203160317034c0320034c0342034c03160743036801000000066c65646765720342030c051f0200000004030c03460350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000b63616c6c556e7061757365', '05020000013503210316051f02000000020317050d036c072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316032003170321074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000034803190325072c0200000000020000001f034f07430368010000001053656e64657249734e6f744f776e657203420327074303590303030c0346074303690a0000000c0501000000067061757365640350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000c63616c6c4765744f776e6572', '05020000016703210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f02000000350555036e072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000000c63616c6c5472616e73666572', '0502000008d203210316051f02000000020317050d0765036e0765036e0362072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000b40321074303690a0000000c0501000000067061757365640329072f02000000220743036801000000175553746f72653a206e6f206669656c642070617573656403270200000000050d0359072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b2070617573656403270200000000072c0200000027034f074303680100000018546f6b656e4f7065726174696f6e734172655061757365640342032702000000000321032103170316051f0200000002031603190325072c02000000020320020000078203210316034803190325072c020000000002000002790321051f02000002700321051f02000000b5051f020000000203210316034803420321051f020000008703170743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000060723036e03620200000002031703160329072f0200000006074303620000020000000003210316051f020000006b0348051f0200000060032103170317071f00020200000002032105700002034b0356072f020000003b051f02000000020321034c051f02000000020321034c0317031703420743036801000000124e6f74456e6f756768416c6c6f77616e636503420327020000000003420342051f020000000403200320051f02000000020321034c051f020000000403210316034c0743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000140723036e036207430362000003420723036e0362020000000403210317071f0002020000000203210570000203170317032103300325072c02000000060320053e036202000000020346071f00030200000002032105700003031703160350051f020000000d0321051f020000000203160317034c0320034c0342034c03160743036801000000066c65646765720342030c051f0200000004030c03460350051f02000000020321034c051f02000000020321034c031703160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f020000003903210317031703300325072c020000000c053e076503620760036e03620200000017032103170317051f02000000060723036e0362034203460200000038051f02000000020321034c03170317051f0200000004032103160312051f020000000d0321051f020000000203170316034c032003420346034c0321051f0200000034031703160743036801000000066c65646765720342030c051f0200000014072f0200000004053e03690200000004030c034603500321051f02000000f7031703170330051f02000000900321074303690a0000001105010000000b746f74616c537570706c790329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c537570706c7903270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c537570706c790327020000000003120356072f020000002a07430368010000001f496e7465726e616c3a204e6567617469766520746f74616c20737570706c7903270200000000030c0346074303690a0000001105010000000b746f74616c537570706c790350051f02000000020321034c051f02000000020321034c03160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f020000002b03170317074303620000034c03420743036801000000104e6f74456e6f75676842616c616e636503420327020000000003210316071f0002020000000203210570000203170317034c034b0356072f02000000300316051f02000000020321034c0317031703420743036801000000104e6f74456e6f75676842616c616e6365034203270200000000051f020000000d0321051f020000000203170316034c03200342051f02000000020321034c051f02000000700321031603300325072c020000002603210317034503300325072c020000000e0320053e076503620760036e03620200000002034602000000020346034c03160743036801000000066c65646765720342030c051f0200000014072f0200000004053e03690200000004030c0346035003170317033b051f02000000900321074303690a0000001105010000000b746f74616c537570706c790329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c537570706c7903270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c537570706c790327020000000003120356072f020000002a07430368010000001f496e7465726e616c3a204e6567617469766520746f74616c20737570706c7903270200000000030c0346074303690a0000001105010000000b746f74616c537570706c790350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000000e63616c6c47657442616c616e6365', '05020000017b03210316051f02000000020317050d0765036e036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550362072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c034203210316051f020000000203170743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f020000000607430362000002000000020316051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000000f63616c6c4164644f70657261746f72', '0502000001d903210316051f02000000020317050d036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000af0321074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000034803190325072c0200000000020000001f034f07430368010000001053656e64657249734e6f744f776e657203420327051f02000000920321074303690a0000000f0501000000096f70657261746f72730329072f020000002507430368010000001a5553746f72653a206e6f206669656c64206f70657261746f727303270200000000050d0566036e072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b206f70657261746f72730327020000000007430359030a0350030c0346074303690a0000000f0501000000096f70657261746f72730350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000001063616c6c476574416c6c6f77616e6365', '0502000001a003210316051f02000000020317050d07650765036e036e036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550362072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c034203210316051f020000000203170321051f020000008703160743036801000000066c65646765720342030c0329072f020000000c053e076503620760036e03620200000044050d076503620760036e0362072f020000002a07430368010000001f5553746f72653a206661696c656420746f20756e7061636b206c6564676572032702000000000346072f02000000060723036e03620200000002031703170329072f02000000060743036200000200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001063616c6c476574546f6b656e436f6465', '05020000017303210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550368072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000000f050100000009746f6b656e436f64650329072f020000002507430368010000001a5553746f72653a206e6f206669656c6420746f6b656e436f646503270200000000050d0368072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b20746f6b656e436f646503270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001063616c6c476574546f6b656e4e616d65', '05020000017303210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550368072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000000f050100000009746f6b656e4e616d650329072f020000002507430368010000001a5553746f72653a206e6f206669656c6420746f6b656e4e616d6503270200000000050d0368072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b20746f6b656e4e616d6503270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001263616c6c476574546f74616c4275726e6564', '05020000017903210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550362072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000001105010000000b746f74616c4275726e65640329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c4275726e656403270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c4275726e656403270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001263616c6c476574546f74616c4d696e746564', '05020000017903210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550362072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000001105010000000b746f74616c4d696e7465640329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c4d696e74656403270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c4d696e74656403270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001263616c6c476574546f74616c537570706c79', '05020000017903210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f020000003505550362072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000001105010000000b746f74616c537570706c790329072f020000002707430368010000001c5553746f72653a206e6f206669656c6420746f74616c537570706c7903270200000000050d0362072f020000002f0743036801000000245553746f72653a206661696c656420746f20756e7061636b20746f74616c537570706c7903270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001263616c6c52656d6f76654f70657261746f72', '0502000001d903210316051f02000000020317050d036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000af0321074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000034803190325072c0200000000020000001f034f07430368010000001053656e64657249734e6f744f776e657203420327051f02000000920321074303690a0000000f0501000000096f70657261746f72730329072f020000002507430368010000001a5553746f72653a206e6f206669656c64206f70657261746f727303270200000000050d0566036e072f020000002d0743036801000000225553746f72653a206661696c656420746f20756e7061636b206f70657261746f7273032702000000000743035903030350030c0346074303690a0000000f0501000000096f70657261746f72730350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000001363616c6c4163636570744f776e657273686970', '05020000025003210316051f02000000020317050d036c072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316032003170321074303690a0000000e0501000000086e65774f776e65720329072f02000000240743036801000000195553746f72653a206e6f206669656c64206e65774f776e657203270200000000050d0563036e072f020000002c0743036801000000215553746f72653a206661696c656420746f20756e7061636b206e65774f776e657203270200000000072f0200000029034f07430368010000001a4e6f74496e5472616e736665724f776e6572736869704d6f6465034203270200000034034803190325072c02000000000200000022034f07430368010000001353656e64657249734e6f744e65774f776e6572034203270321074303690a0000000e0501000000086e65774f776e65720329072f02000000240743036801000000195553746f72653a206e6f206669656c64206e65774f776e657203270200000000050d0563036e072f020000002c0743036801000000215553746f72653a206661696c656420746f20756e7061636b206e65774f776e657203270200000000072f0200000029034f07430368010000001a4e6f74496e5472616e736665724f776e6572736869704d6f646503420327020000003b030c0346074303690a0000000b0501000000056f776e65720350053e036e030c0346074303690a0000000e0501000000086e65774f776e65720350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000001463616c6c47657452656465656d41646472657373', '05020000017f03210316051f02000000020317050d0765036c036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f0200000002031703210316051f02000000020317051f02000000350555036e072f0200000025034f074303680100000016556e6578706563746564436f6e747261637454797065034203270200000000034203210316051f02000000020317051f020000000b051f02000000020321034c03420317074303690a0000001305010000000d72656465656d416464726573730329072f020000002907430368010000001e5553746f72653a206e6f206669656c642072656465656d4164647265737303270200000000050d036e072f02000000310743036801000000265553746f72653a206661696c656420746f20756e7061636b2072656465656d4164647265737303270200000000051f02000000020313034d053d036d034c031b034203210316051f020000000203170342', '0507070100000004636f6465010000001463616c6c53657452656465656d41646472657373', '05020000014203210316051f02000000020317050d036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000af0321074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000034803190325072c0200000000020000001f034f07430368010000001053656e64657249734e6f744f776e657203420327030c0346074303690a0000001305010000000d72656465656d416464726573730350053d036d034203210316051f020000000203170342', '0507070100000004636f6465010000001563616c6c5472616e736665724f776e657273686970', '05020000013f03210316051f02000000020317050d036e072f0200000029034f07430368010000001a55706172616d417267756d656e74556e7061636b4661696c6564034203270200000000034203210316051f02000000020317051f02000000af0321074303690a0000000b0501000000056f776e65720329072f02000000210743036801000000165553746f72653a206e6f206669656c64206f776e657203270200000000050d036e072f020000002907430368010000001e5553746f72653a206661696c656420746f20756e7061636b206f776e657203270200000000034803190325072c0200000000020000001f034f07430368010000001053656e64657249734e6f744f776e6572034203270346030c0346074303690a0000000e0501000000086e65774f776e65720350053d036d034203210316051f020000000203170342'] class TestPacking(TestCase): @parameterized.expand([ ({"bytes": "000018896fcfc6690baefa9aedc6d759f9bf05727e8c"}, {"prim": "address"}, "expru2YV8AanTTUSV4K21P7X4DzbuWQFVk7NewDuP1A5uamffiiFA3"), ({"string": "tz1MsmYzmqxHs9trE1qQugZxxcLPqAXdQaX9"}, {"prim": "address"}, "expru2YV8AanTTUSV4K21P7X4DzbuWQFVk7NewDuP1A5uamffiiFA3"), ({"string": "Game one!"}, {"prim": "string"}, "exprtiRSZkLKYRess9GZ3ryb4cVQD36WLo2oysZBFxKTZ2jXqcHWGj"), ({"int": "505506"}, {"prim": "int"}, "exprufzwVGdAX7zG91UpiAkR2yVxEDE75tHD5YgSBmYMUx22teZTCM"), ([{"int": "1"}, {"int": "1"}, {"int": "1"}, {"int": "1"}], {"prim": "pair", "args": [{"prim": "int"}, {"prim": "int"}, {"prim": "int"}, {"prim": "int"}]}, "expruN32WETsB2Dx1AynDmMufVr1As9qdnjRxKQ82rk2qZ4uxuKVMK") ]) def test_get_key_hash(self, val_expr, type_expr, expected): ty = MichelsonType.match(type_expr) key = ty.from_micheline_value(val_expr).pack(legacy=True) self.assertEqual(expected, forge_script_expr(key)) @parameterized.expand([(x,) for x in unknown_data]) def test_blind_unpack(self, data): data = bytes.fromhex(data) res = blind_unpack(data) self.assertNotEqual(data, res) def test_regr_local_remote_diff(self): opg = {'branch': 'BKpLvH3E3bUa5Z2nb3RkH2p6EKLfymvxUAEgtRJnu4m9UX1TWUb', 'contents': [{'amount': '0', 'counter': '446245', 'destination': 'KT1VYUxhLoSvouozCaDGL1XcswnagNfwr3yi', 'fee': '104274', 'gas_limit': '1040000', 'kind': 'transaction', 'parameters': {'entrypoint': 'default', 'value': {'prim': 'Unit'}}, 'source': 'tz1grSQDByRpnVs7sPtaprNZRp531ZKz6Jmm', 'storage_limit': '60000'}], 'protocol': 'PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb', 'signature': None} local = forge_operation_group(opg).hex() remote = "0dc397b7865779d87bd47d406e8b4eee84498f22ab01dff124433c7f057af5ae6c00e8b36c80efb51ec85a1456" \ "2426049aa182a3ce38d2ae06a59e1b80bd3fe0d4030001e5ebf2dcc7dcc9d13c2c45cd76823dd604740c7f0000" self.assertEqual(remote, local) def test_forge_combs(self): expr = {'prim': 'Pair', 'args': [{'int': '1'}, {'int': '2'}, {'int': '3'}, {'int': '4'}]} self.assertEqual(expr, unforge_micheline(forge_micheline(expr)))
230.212598
4,535
0.942949
288
29,237
95.604167
0.559028
0.001271
0.001017
0.000872
0.001598
0.001598
0
0
0
0
0
0.848631
0.036734
29,237
126
4,536
232.039683
0.129035
0
0
0.094017
0
0
0.914663
0.902145
0
1
0
0
0.034188
1
0.034188
false
0
0.051282
0
0.094017
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
8
d9f34b9314fd8e3a689ab67f5b71e7ba87a6d47f
102
py
Python
tests/test_version.py
cariad/stackwhy
5d5f0764ab86740fbbc3ae25170d149b388b949c
[ "MIT" ]
null
null
null
tests/test_version.py
cariad/stackwhy
5d5f0764ab86740fbbc3ae25170d149b388b949c
[ "MIT" ]
4
2021-10-31T15:52:31.000Z
2021-11-01T13:06:27.000Z
tests/test_version.py
cariad/stackwhy
5d5f0764ab86740fbbc3ae25170d149b388b949c
[ "MIT" ]
null
null
null
from stackwhy.version import get_version def test() -> None: assert get_version() == "-1.-1.-1"
17
40
0.656863
15
102
4.333333
0.666667
0.307692
0
0
0
0
0
0
0
0
0
0.035714
0.176471
102
5
41
20.4
0.738095
0
0
0
0
0
0.078431
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
8a1b8e369c389b5a34630895043adf43e528b743
45,947
py
Python
plugin.video.rebirth/resources/lib/modules/cleangenre.py
TheWardoctor/wardoctors-repo
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
[ "Apache-2.0" ]
1
2019-03-05T09:38:10.000Z
2019-03-05T09:38:10.000Z
plugin.video.rebirth/resources/lib/modules/cleangenre.py
TheWardoctor/wardoctors-repo
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
[ "Apache-2.0" ]
null
null
null
plugin.video.rebirth/resources/lib/modules/cleangenre.py
TheWardoctor/wardoctors-repo
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
[ "Apache-2.0" ]
1
2021-11-05T20:48:09.000Z
2021-11-05T20:48:09.000Z
# -*- coding: utf-8 -*- ################################################################################ # | # # | ______________________________________________________________ # # | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| # # | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| # # | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| # # | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| # # | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| # # | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| # # | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| # # | 8888888888888888888888888888......8888888888888888888888888888| # # | ..............................................................| # # | ..............................................................| # # | 8888888888888888888888888888......8888888888888888888888888888| # # | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| # # | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| # # | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| # # | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| # # | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| # # | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| # # | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| # # | # # | Rebirth Addon # # | Copyright (C) 2017 Cypher # # | # # | This program is free software: you can redistribute it and/or modify # # | it under the terms of the GNU General Public License as published by # # | the Free Software Foundation, either version 3 of the License, or # # | (at your option) any later version. # # | # # | This program is distributed in the hope that it will be useful, # # | but WITHOUT ANY WARRANTY; without even the implied warranty of # # | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # | GNU General Public License for more details. # # | # ################################################################################ def lang(i, lang): if lang == 'bg': i = i.replace('Action', u'\u0415\u043a\u0448\u044a\u043d') i = i.replace('Adventure', u'\u041f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435') i = i.replace('Animation', u'\u0410\u043d\u0438\u043c\u0430\u0446\u0438\u044f') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u044f') i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438\u043d\u0430\u043b\u0435\u043d') i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u0435\u043d') i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430') i = i.replace('Family', u'\u0421\u0435\u043c\u0435\u0435\u043d') i = i.replace('Fantasy', u'\u0424\u0435\u043d\u0442\u044a\u0437\u0438') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0447\u0435\u0441\u043a\u0438') i = i.replace('Horror', u'\u0423\u0436\u0430\u0441') i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u043a\u0430') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u044f') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0420\u043e\u043c\u0430\u043d\u0441') i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u044a\u0440') i = i.replace('War', u'\u0412\u043e\u0435\u043d\u0435\u043d') i = i.replace('Western', u'\u0423\u0435\u0441\u0442\u044a\u0440\u043d') elif lang == 'cs': i = i.replace('Action', u'\u0041\u006b\u010d\u006e\u00ed') i = i.replace('Adventure', u'\u0044\u006f\u0062\u0072\u006f\u0064\u0072\u0075\u017e\u006e\u00fd') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u006f\u0076\u0061\u006e\u00fd') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065') i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u006e\u00ed') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0052\u006f\u0064\u0069\u006e\u006e\u00fd') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0063\u006b\u00fd') i = i.replace('Horror', u'\u0048\u006f\u0072\u006f\u0072') i = i.replace('Music ', u'\u0048\u0075\u0064\u0065\u0062\u006e\u00ed') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u00f3\u007a\u006e\u00ed') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063\u006b\u00fd') i = i.replace('Science Fiction', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd') i = i.replace('Sci-Fi', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0056\u00e1\u006c\u0065\u010d\u006e\u00fd') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'da': i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e') i = i.replace('Adventure', u'\u0045\u0076\u0065\u006e\u0074\u0079\u0072') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065') i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u0069\u0074\u0065\u0074') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0079') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065 ') i = i.replace('Horror', u'\u0047\u0079\u0073\u0065\u0072') i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0075\u006d') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0066\u0069') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0066\u0069') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u004b\u0072\u0069\u0067') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'de': i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e') i = i.replace('Adventure', u'\u0041\u0062\u0065\u006e\u0074\u0065\u0075\u0065\u0072') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u00f6\u0064\u0069\u0065') i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0066\u0069\u006c\u006d') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0079') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u004c\u006f\u0076\u0065\u0073\u0074\u006f\u0072\u0079') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u004b\u0072\u0069\u0065\u0067\u0073\u0066\u0069\u006c\u006d') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'el': i = i.replace('Action', u'\u0394\u03c1\u03ac\u03c3\u03b7') i = i.replace('Adventure', u'\u03a0\u03b5\u03c1\u03b9\u03c0\u03ad\u03c4\u03b5\u03b9\u03b1') i = i.replace('Animation', u'\u039a\u03b9\u03bd\u03bf\u03cd\u03bc\u03b5\u03bd\u03b1 \u03a3\u03c7\u03ad\u03b4\u03b9\u03b1') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'\u0392\u03b9\u03bf\u03b3\u03c1\u03b1\u03c6\u03b9\u03ba\u03ae') i = i.replace('Comedy', u'\u039a\u03c9\u03bc\u03c9\u03b4\u03af\u03b1') i = i.replace('Crime', u'\u0391\u03c3\u03c4\u03c5\u03bd\u03bf\u03bc\u03b9\u03ba\u03ae') i = i.replace('Documentary', u'\u039d\u03c4\u03bf\u03ba\u03c5\u03bc\u03b1\u03bd\u03c4\u03ad\u03c1') i = i.replace('Drama', u'\u0394\u03c1\u03ac\u03bc\u03b1') i = i.replace('Family', u'\u039f\u03b9\u03ba\u03bf\u03b3\u03b5\u03bd\u03b5\u03b9\u03b1\u03ba\u03ae') i = i.replace('Fantasy', u'\u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2') i = i.replace('Game-Show', u'\u03a4\u03b7\u03bb\u03b5\u03c0\u03b1\u03b9\u03c7\u03bd\u03af\u03b4\u03b9') i = i.replace('History', u'\u0399\u03c3\u03c4\u03bf\u03c1\u03b9\u03ba\u03ae') i = i.replace('Horror', u'\u03a4\u03c1\u03cc\u03bc\u03bf\u03c5') i = i.replace('Music ', u'\u039c\u03bf\u03c5\u03c3\u03b9\u03ba\u03ae') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u039c\u03c5\u03c3\u03c4\u03b7\u03c1\u03af\u03bf\u03c5') i = i.replace('News', u'\u0395\u03b9\u03b4\u03ae\u03c3\u03b5\u03b9\u03c2') i = i.replace('Reality-TV', u'\u03a1\u03b9\u03ac\u03bb\u03b9\u03c4\u03c5') i = i.replace('Romance', u'\u03a1\u03bf\u03bc\u03b1\u03bd\u03c4\u03b9\u03ba\u03ae') i = i.replace('Science Fiction', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2') i = i.replace('Sci-Fi', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2') i = i.replace('Sport', u'\u0391\u03b8\u03bb\u03b7\u03c4\u03b9\u03ba\u03ae') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0398\u03c1\u03af\u03bb\u03b5\u03c1') i = i.replace('War', u'\u03a0\u03bf\u03bb\u03b5\u03bc\u03b9\u03ba\u03ae') i = i.replace('Western', u'\u0393\u03bf\u03c5\u03ad\u03c3\u03c4\u03b5\u03c1\u03bd') elif lang == 'es': i = i.replace('Action', u'\u0041\u0063\u0063\u0069\u00f3\u006e') i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u00f3\u006e') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0061') i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065\u006e') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u006c') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u00ed\u0061') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0061') i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006f') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065') i = i.replace('Science Fiction', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e') i = i.replace('Sci-Fi', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0053\u0075\u0073\u0070\u0065\u006e\u0073\u0065') i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'fr': i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e') i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0065') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0065') i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0065') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061\u006c') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0071\u0075\u0065') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0069\u0072\u0065') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u0065\u0075\u0072') i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0071\u0075\u0065') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u00e8\u0072\u0065') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0065') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'he': i = i.replace('Action', u'\u05d0\u05e7\u05e9\u05df') i = i.replace('Adventure', u'\u05d4\u05e8\u05e4\u05ea\u05e7\u05d0\u05d5\u05ea') i = i.replace('Animation', u'\u05d0\u05e0\u05d9\u05de\u05e6\u05d9\u05d4') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u05e7\u05d5\u05de\u05d3\u05d9\u05d4') i = i.replace('Crime', u'\u05e4\u05e9\u05e2') i = i.replace('Documentary', u'\u05d3\u05d5\u05e7\u05d5\u05de\u05e0\u05d8\u05e8\u05d9') i = i.replace('Drama', u'\u05d3\u05e8\u05de\u05d4') i = i.replace('Family', u'\u05de\u05e9\u05e4\u05d7\u05d4') i = i.replace('Fantasy', u'\u05e4\u05e0\u05d8\u05d6\u05d9\u05d4') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u05d4\u05e1\u05d8\u05d5\u05e8\u05d9\u05d4') i = i.replace('Horror', u'\u05d0\u05d9\u05de\u05d4') i = i.replace('Music ', u'\u05de\u05d5\u05e1\u05d9\u05e7\u05d4') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u05de\u05e1\u05ea\u05d5\u05e8\u05d9\u05df') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u05e8\u05d5\u05de\u05e0\u05d8\u05d9') i = i.replace('Science Fiction', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9') i = i.replace('Sci-Fi', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u05de\u05d5\u05ea\u05d7\u05df') i = i.replace('War', u'\u05de\u05dc\u05d7\u05de\u05d4') i = i.replace('Western', u'\u05de\u05e2\u05e8\u05d1\u05d5\u05df') elif lang == 'hu': i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u00f3') i = i.replace('Adventure', u'\u004b\u0061\u006c\u0061\u006e\u0064') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u00e1\u0063\u0069\u00f3\u0073') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0056\u00ed\u0067\u006a\u00e1\u0074\u00e9\u006b') i = i.replace('Crime', u'\u0042\u0171\u006e\u00fc\u0067\u0079\u0069') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0075\u006d') i = i.replace('Drama', u'\u0044\u0072\u00e1\u006d\u0061') i = i.replace('Family', u'\u0043\u0073\u0061\u006c\u00e1\u0064\u0069') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0054\u00f6\u0072\u0074\u00e9\u006e\u0065\u006c\u006d\u0069') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u005a\u0065\u006e\u0065\u0069') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u0052\u0065\u006a\u0074\u00e9\u006c\u0079') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0075\u0073') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0048\u00e1\u0062\u006f\u0072\u00fa\u0073') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'it': i = i.replace('Action', u'\u0041\u007a\u0069\u006f\u006e\u0065') i = i.replace('Adventure', u'\u0041\u0076\u0076\u0065\u006e\u0074\u0075\u0072\u0061') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u007a\u0069\u006f\u006e\u0065') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0043\u006f\u006d\u006d\u0065\u0064\u0069\u0061') i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0069\u006f') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u0067\u006c\u0069\u0061') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0053\u0074\u006f\u0072\u0069\u0061') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063\u0061') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u006f') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065') i = i.replace('Science Fiction', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061') i = i.replace('Sci-Fi', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'ja': i = i.replace('Action', u'\u30a2\u30af\u30b7\u30e7\u30f3') i = i.replace('Adventure', u'\u30a2\u30c9\u30d9\u30f3\u30c1\u30e3\u30fc') i = i.replace('Animation', u'\u30a2\u30cb\u30e1\u30fc\u30b7\u30e7\u30f3') i = i.replace('Anime', u'\u30a2\u30cb\u30e1') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u30b3\u30e1\u30c7\u30a3') i = i.replace('Crime', u'\u72af\u7f6a') i = i.replace('Documentary', u'\u30c9\u30ad\u30e5\u30e1\u30f3\u30bf\u30ea\u30fc') i = i.replace('Drama', u'\u30c9\u30e9\u30de') i = i.replace('Family', u'\u30d5\u30a1\u30df\u30ea\u30fc') i = i.replace('Fantasy', u'\u30d5\u30a1\u30f3\u30bf\u30b8\u30fc') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u5c65\u6b74') i = i.replace('Horror', u'\u30db\u30e9\u30fc') i = i.replace('Music ', u'\u97f3\u697d') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u8b0e') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u30ed\u30de\u30f3\u30b9') i = i.replace('Science Fiction', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3') i = i.replace('Sci-Fi', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u30b9\u30ea\u30e9\u30fc') i = i.replace('War', u'\u6226\u4e89') i = i.replace('Western', u'\u897f\u6d0b') elif lang == 'ko': i = i.replace('Action', u'\uc561\uc158') i = i.replace('Adventure', u'\ubaa8\ud5d8') i = i.replace('Animation', u'\uc560\ub2c8\uba54\uc774\uc158') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\ucf54\ubbf8\ub514') i = i.replace('Crime', u'\ubc94\uc8c4') i = i.replace('Documentary', u'\ub2e4\ud050\uba58\ud130\ub9ac') i = i.replace('Drama', u'\ub4dc\ub77c\ub9c8') i = i.replace('Family', u'\uac00\uc871') i = i.replace('Fantasy', u'\ud310\ud0c0\uc9c0') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\uc5ed\uc0ac') i = i.replace('Horror', u'\uacf5\ud3ec') i = i.replace('Music ', u'\uc74c\uc545') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\ubbf8\uc2a4\ud130\ub9ac') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\ub85c\ub9e8\uc2a4') i = i.replace('Science Fiction', u'\u0053\u0046') i = i.replace('Sci-Fi', u'\u0053\u0046') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\uc2a4\ub9b4\ub7ec') i = i.replace('War', u'\uc804\uc7c1') i = i.replace('Western', u'\uc11c\ubd80') elif lang == 'nl': i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u0065') i = i.replace('Adventure', u'\u0041\u0076\u006f\u006e\u0074\u0075\u0075\u0072') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u0065') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065') i = i.replace('Crime', u'\u004d\u0069\u0073\u0064\u0061\u0061\u0064') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0065') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u0063\u0068') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0065\u006b') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0065') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0065\u006b') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u004f\u006f\u0072\u006c\u006f\u0067') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'pl': i = i.replace('Action', u'\u0041\u006b\u0063\u006a\u0061') i = i.replace('Adventure', u'\u0050\u0072\u007a\u0079\u0067\u006f\u0064\u006f\u0077\u0079') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u006a\u0061') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0061') i = i.replace('Crime', u'\u004b\u0072\u0079\u006d\u0069\u006e\u0061\u0142') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u006c\u006e\u0079') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061\u0074') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u006a\u006e\u0079') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0079\u0063\u007a\u006e\u0079') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u0075\u007a\u0079\u0063\u007a\u006e\u0079') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u0054\u0061\u006a\u0065\u006d\u006e\u0069\u0063\u0061') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0073') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0057\u006f\u006a\u0065\u006e\u006e\u0079') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'pt': i = i.replace('Action', u'\u0041\u00e7\u00e3\u006f') i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u00e7\u00e3\u006f') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0061') i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u0069\u006f') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u00ed\u006c\u0069\u0061') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0061') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u00f3\u0072\u0069\u0061') i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u00e9\u0072\u0069\u006f') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065') i = i.replace('Science Fiction', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061') i = i.replace('Sci-Fi', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061') i = i.replace('Western', u'\u0046\u0061\u0072\u006f\u0065\u0073\u0074\u0065') elif lang == 'ro': i = i.replace('Action', u'\u0041\u0063\u021b\u0069\u0075\u006e\u0065') i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0069') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0163\u0069\u0065') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0065') i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0103') i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0103') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0049\u0073\u0074\u006f\u0072\u0069\u0063') i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072') i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0063\u0103') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063') i = i.replace('Science Fiction', u'\u0053\u0046') i = i.replace('Sci-Fi', u'\u0053\u0046') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u0052\u0103\u007a\u0062\u006f\u0069') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'ru': i = i.replace('Action', u'\u0431\u043e\u0435\u0432\u0438\u043a') i = i.replace('Adventure', u'\u043f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u044f') i = i.replace('Animation', u'\u043c\u0443\u043b\u044c\u0442\u0444\u0438\u043b\u044c\u043c') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u043a\u043e\u043c\u0435\u0434\u0438\u044f') i = i.replace('Crime', u'\u043a\u0440\u0438\u043c\u0438\u043d\u0430\u043b') i = i.replace('Documentary', u'\u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u044c\u043d\u044b\u0439') i = i.replace('Drama', u'\u0434\u0440\u0430\u043c\u0430') i = i.replace('Family', u'\u0441\u0435\u043c\u0435\u0439\u043d\u044b\u0439') i = i.replace('Fantasy', u'\u0444\u044d\u043d\u0442\u0435\u0437\u0438') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0438\u0441\u0442\u043e\u0440\u0438\u044f') i = i.replace('Horror', u'\u0443\u0436\u0430\u0441\u044b') i = i.replace('Music ', u'\u043c\u0443\u0437\u044b\u043a\u0430') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u0434\u0435\u0442\u0435\u043a\u0442\u0438\u0432') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u043c\u0435\u043b\u043e\u0434\u0440\u0430\u043c\u0430') i = i.replace('Science Fiction', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sci-Fi', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0442\u0440\u0438\u043b\u043b\u0435\u0440') i = i.replace('War', u'\u0432\u043e\u0435\u043d\u043d\u044b\u0439') i = i.replace('Western', u'\u0432\u0435\u0441\u0442\u0435\u0440\u043d') elif lang == 'sl': i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u006a\u0061') i = i.replace('Adventure', u'\u0041\u0076\u0061\u006e\u0074\u0075\u0072\u0061') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u006a\u0061') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0064\u0069\u006a\u0061') i = i.replace('Crime', u'\u041a\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u006e\u0069') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u006e\u0069') i = i.replace('Drama', u'\u0044\u0072\u0430\u043c\u0430') i = i.replace('Family', u'\u0044\u0072\u0075\u017e\u0069\u006e\u0073\u006b\u0069') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u005a\u0067\u006f\u0064\u006f\u0076\u0069\u006e\u0073\u006b\u0069') i = i.replace('Horror', u'\u0047\u0072\u006f\u007a\u006c\u006a\u0069\u0076\u006b\u0061') i = i.replace('Music ', u'\u0047\u006c\u0061\u007a\u0062\u0065\u006e\u0069') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006a\u0061') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0061') i = i.replace('Science Fiction', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061') i = i.replace('Sci-Fi', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0422\u0072\u0069\u006c\u0065\u0072') i = i.replace('War', u'\u0056\u006f\u006a\u006e\u006f\u002d\u0070\u006f\u006c\u0069\u0074\u0069\u010d\u006e\u0069') i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e') elif lang == 'sr': i = i.replace('Action', u'\u0410\u043a\u0446\u0438\u043e\u043d\u0438') i = i.replace('Adventure', u'\u0410\u0432\u0430\u043d\u0442\u0443\u0440\u0438\u0441\u0442\u0438\u0447\u043a\u0438') i = i.replace('Animation', u'\u0426\u0440\u0442\u0430\u043d\u0438') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u0458\u0430') i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438') i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u0440\u043d\u0438') i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430') i = i.replace('Family', u'\u041f\u043e\u0440\u043e\u0434\u0438\u0447\u043d\u0438') i = i.replace('Fantasy', u'\u0424\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0458\u0441\u043a\u0438') i = i.replace('Horror', u'\u0425\u043e\u0440\u043e\u0440') i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u0447\u043a\u0438') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u0458\u0430') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0409\u0443\u0431\u0430\u0432\u043d\u0438') i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u0435\u0440') i = i.replace('War', u'\u0420\u0430\u0442\u043d\u0438') i = i.replace('Western', u'\u0412\u0435\u0441\u0442\u0435\u0440\u043d') elif lang == 'sv': i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e') i = i.replace('Adventure', u'\u00c4\u0076\u0065\u006e\u0074\u0079\u0072') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0065\u0072\u0061\u0074') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069') i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c') i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e4\u0072') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061') i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u006a') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u006b') i = i.replace('Horror', u'\u0053\u006b\u0072\u00e4\u0063\u006b') i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0069\u006b') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b') i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072') i = i.replace('War', u'\u004b\u0072\u0069\u0067') i = i.replace('Western', u'\u0056\u00e4\u0073\u0074\u0065\u0072\u006e') elif lang == 'tr': i = i.replace('Action', u'\u0041\u006b\u0073\u0069\u0079\u006f\u006e') i = i.replace('Adventure', u'\u004d\u0061\u0063\u0065\u0072\u0061') i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0073\u0079\u006f\u006e') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069') i = i.replace('Crime', u'\u0053\u0075\u00e7') i = i.replace('Documentary', u'\u0042\u0065\u006c\u0067\u0065\u0073\u0065\u006c') i = i.replace('Drama', u'\u0044\u0072\u0061\u006d') i = i.replace('Family', u'\u0041\u0069\u006c\u0065') i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u0054\u0061\u0072\u0069\u0068') i = i.replace('Horror', u'\u004b\u006f\u0072\u006b\u0075') i = i.replace('Music ', u'\u004d\u00fc\u007a\u0069\u006b') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u0047\u0069\u007a\u0065\u006d') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b') i = i.replace('Science Fiction', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075') i = i.replace('Sci-Fi', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u0047\u0065\u0072\u0069\u006c\u0069\u006d') i = i.replace('War', u'\u0053\u0061\u0076\u0061\u015f') i = i.replace('Western', u'\u0056\u0061\u0068\u015f\u0069 \u0042\u0061\u0074\u0131') elif lang == 'zh': i = i.replace('Action', u'\u52a8\u4f5c') i = i.replace('Adventure', u'\u5192\u9669') i = i.replace('Animation', u'\u52a8\u753b') i = i.replace('Anime', u'Anime') i = i.replace('Biography', u'Biography') i = i.replace('Comedy', u'\u559c\u5267') i = i.replace('Crime', u'\u72af\u7f6a') i = i.replace('Documentary', u'\u7eaa\u5f55') i = i.replace('Drama', u'\u5267\u60c5') i = i.replace('Family', u'\u5bb6\u5ead') i = i.replace('Fantasy', u'\u5947\u5e7b') i = i.replace('Game-Show', u'Game-Show') i = i.replace('History', u'\u5386\u53f2') i = i.replace('Horror', u'\u6050\u6016') i = i.replace('Music ', u'\u97f3\u4e50') i = i.replace('Musical', u'Musical') i = i.replace('Mystery', u'\u60ac\u7591') i = i.replace('News', u'News') i = i.replace('Reality-TV', u'Reality-TV') i = i.replace('Romance', u'\u7231\u60c5') i = i.replace('Science Fiction', u'\u79d1\u5e7b') i = i.replace('Sci-Fi', u'\u79d1\u5e7b') i = i.replace('Sport', u'Sport') i = i.replace('Talk-Show', u'Talk-Show') i = i.replace('Thriller', u'\u60ca\u609a') i = i.replace('War', u'\u6218\u4e89') i = i.replace('Western', u'\u897f\u90e8') return i
65.17305
166
0.60276
6,640
45,947
4.161596
0.063404
0.042992
0.193464
0.024319
0.832845
0.764629
0.735642
0.70803
0.686209
0.667427
0
0.296775
0.184212
45,947
704
167
65.265625
0.44044
0.056543
0
0.443366
0
0.088997
0.558793
0.413187
0
0
0
0
0
1
0.001618
false
0
0
0
0.003236
0
0
0
0
null
0
1
0
1
1
1
1
0
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
8a378ec84e9121971164512e16ad4dcda31f608a
11,933
py
Python
tests/core/test_task_operators.py
jamestwebber/prefect
410c4ac37d2595ab61007742883687f5e284821d
[ "Apache-2.0" ]
null
null
null
tests/core/test_task_operators.py
jamestwebber/prefect
410c4ac37d2595ab61007742883687f5e284821d
[ "Apache-2.0" ]
null
null
null
tests/core/test_task_operators.py
jamestwebber/prefect
410c4ac37d2595ab61007742883687f5e284821d
[ "Apache-2.0" ]
null
null
null
from prefect.core import Edge, Flow, Parameter, Task class TestMagicInteractionMethods: # ----------------------------------------- # getitem def test_getitem_list(self): with Flow(name="test") as f: z = Parameter("x")[Parameter("y")] state = f.run(parameters=dict(x=[1, 2, 3], y=1)) assert state.result[z].result == 2 def test_getitem_dict(self): with Flow(name="test") as f: z = Parameter("x")[Parameter("y")] state = f.run(parameters=dict(x=dict(a=1, b=2, c=3), y="b")) assert state.result[z].result == 2 def test_getitem_constant(self): with Flow(name="test") as f: z = Parameter("x")["b"] state = f.run(parameters=dict(x=dict(a=1, b=2, c=3))) assert state.result[z].result == 2 # ----------------------------------------- # or / pipe / | def test_or(self): with Flow(name="test") as f: t1 = Task() t2 = Task() t1 | t2 assert Edge(t1, t2) in f.edges def test_or_with_constant(self): with Flow(name="test") as f: t1 = Task() t1 | 1 assert len(f.tasks) == 2 assert len(f.edges) == 1 def test_ror_with_constant(self): with Flow(name="test") as f: t1 = Task() 1 | t1 assert len(f.tasks) == 2 assert len(f.edges) == 1 # ----------------------------------------- # Chain def test_chained_operators(self): with Flow(name="test") as f: t1 = Task("t1") t2 = Task("t2") t3 = Task("t3") t4 = Task("t4") t5 = Task("t5") t6 = Task("t6") (t1 | t2 | t3 | t4) assert all([e in f.edges for e in [Edge(t1, t2), Edge(t2, t3), Edge(t3, t4)]]) class TestMagicOperatorMethods: # ----------------------------------------- # addition def test_addition(self): with Flow(name="test") as f: z = Parameter("x") + Parameter("y") state = f.run(parameters=dict(x=1, y=2)) assert state.result[z].result == 3 def test_addition_with_constant(self): with Flow(name="test") as f: z = Parameter("x") + 10 state = f.run(parameters=dict(x=1)) assert state.result[z].result == 11 def test_right_addition(self): with Flow(name="test") as f: z = 10 + Parameter("x") state = f.run(parameters=dict(x=1)) assert state.result[z].result == 11 # ----------------------------------------- # subtraction def test_subtraction(self): with Flow(name="test") as f: z = Parameter("x") - Parameter("y") state = f.run(parameters=dict(x=1, y=2)) assert state.result[z].result == -1 def test_subtraction_with_constant(self): with Flow(name="test") as f: z = Parameter("x") - 10 state = f.run(parameters=dict(x=1)) assert state.result[z].result == -9 def test_right_subtraction(self): with Flow(name="test") as f: z = 10 - Parameter("x") state = f.run(parameters=dict(x=1)) assert state.result[z].result == 9 # ----------------------------------------- # multiplication def test_multiplication(self): with Flow(name="test") as f: z = Parameter("x") * Parameter("y") state = f.run(parameters=dict(x=2, y=3)) assert state.result[z].result == 6 def test_multiplication_with_constant(self): with Flow(name="test") as f: z = Parameter("x") * 10 state = f.run(parameters=dict(x=2)) assert state.result[z].result == 20 def test_right_multiplication(self): with Flow(name="test") as f: z = 10 * Parameter("x") state = f.run(parameters=dict(x=2)) assert state.result[z].result == 20 # ----------------------------------------- # division def test_division(self): with Flow(name="test") as f: z = Parameter("x") / Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result == 2.5 def test_division_with_constant(self): with Flow(name="test") as f: z = Parameter("x") / 10 state = f.run(parameters=dict(x=35)) assert state.result[z].result == 3.5 def test_right_division(self): with Flow(name="test") as f: z = 10 / Parameter("x") state = f.run(parameters=dict(x=4)) assert state.result[z].result == 2.5 # ----------------------------------------- # floor division def test_floor_division(self): with Flow(name="test") as f: z = Parameter("x") // Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result == 2 def test_floor_division_with_constant(self): with Flow(name="test") as f: z = Parameter("x") // 10 state = f.run(parameters=dict(x=38)) assert state.result[z].result == 3 def test_right_floor_division(self): with Flow(name="test") as f: z = 10 // Parameter("x") state = f.run(parameters=dict(x=4)) assert state.result[z].result == 2 # ----------------------------------------- # mod def test_mod(self): with Flow(name="test") as f: z = Parameter("x") % Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result == 1 def test_mod_with_constant(self): with Flow(name="test") as f: z = Parameter("x") % 10 state = f.run(parameters=dict(x=12)) assert state.result[z].result == 2 def test_right_mod(self): with Flow(name="test") as f: z = 10 % Parameter("x") state = f.run(parameters=dict(x=14)) assert state.result[z].result == 10 # ----------------------------------------- # pow def test_pow(self): with Flow(name="test") as f: z = Parameter("x") ** Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result == 25 def test_pow_with_constant(self): with Flow(name="test") as f: z = Parameter("x") ** 3 state = f.run(parameters=dict(x=2)) assert state.result[z].result == 8 def test_right_pow(self): with Flow(name="test") as f: z = 10 ** Parameter("x") state = f.run(parameters=dict(x=2)) assert state.result[z].result == 100 # ----------------------------------------- # gt def test_gt(self): with Flow(name="test") as f: z = Parameter("x") > Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is True def test_gt_with_constant(self): with Flow(name="test") as f: z = Parameter("x") > 3 state = f.run(parameters=dict(x=2)) assert state.result[z].result is False def test_right_gt(self): with Flow(name="test") as f: z = 10 > Parameter("x") state = f.run(parameters=dict(x=10)) assert state.result[z].result is False # ----------------------------------------- # gte def test_gte(self): with Flow(name="test") as f: z = Parameter("x") >= Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is True def test_gte_with_constant(self): with Flow(name="test") as f: z = Parameter("x") >= 3 state = f.run(parameters=dict(x=2)) assert state.result[z].result is False def test_right_gte(self): with Flow(name="test") as f: z = 10 >= Parameter("x") state = f.run(parameters=dict(x=10)) assert state.result[z].result is True # ----------------------------------------- # lt def test_lt(self): with Flow(name="test") as f: z = Parameter("x") < Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is False def test_lt_with_constant(self): with Flow(name="test") as f: z = Parameter("x") < 3 state = f.run(parameters=dict(x=2)) assert state.result[z].result is True def test_right_lt(self): with Flow(name="test") as f: z = 10 < Parameter("x") state = f.run(parameters=dict(x=10)) assert state.result[z].result is False # ----------------------------------------- # lte def test_lte(self): with Flow(name="test") as f: z = Parameter("x") <= Parameter("y") state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is False def test_lte_with_constant(self): with Flow(name="test") as f: z = Parameter("x") <= 3 state = f.run(parameters=dict(x=2)) assert state.result[z].result is True def test_right_lte(self): with Flow(name="test") as f: z = 10 <= Parameter("x") state = f.run(parameters=dict(x=10)) assert state.result[z].result is True # ----------------------------------------- # and def test_and(self): with Flow(name="test") as f: z = Parameter("x") & Parameter("y") state = f.run(parameters=dict(x=True, y=False)) assert state.result[z].result is False state = f.run(parameters=dict(x=True, y=True)) assert state.result[z].result is True state = f.run(parameters=dict(x=False, y=True)) assert state.result[z].result is False state = f.run(parameters=dict(x=False, y=False)) assert state.result[z].result is False def test_and_with_constant(self): with Flow(name="test") as f: z = Parameter("x") & True state = f.run(parameters=dict(x=True)) assert state.result[z].result is True state = f.run(parameters=dict(x=False)) assert state.result[z].result is False with Flow(name="test") as f: z = Parameter("x") & False state = f.run(parameters=dict(x=True)) assert state.result[z].result is False state = f.run(parameters=dict(x=False)) assert state.result[z].result is False def test_right_and(self): with Flow(name="test") as f: z = True & Parameter("x") state = f.run(parameters=dict(x=True)) assert state.result[z].result is True state = f.run(parameters=dict(x=False)) assert state.result[z].result is False with Flow(name="test") as f: z = False & Parameter("x") state = f.run(parameters=dict(x=True)) assert state.result[z].result is False state = f.run(parameters=dict(x=False)) assert state.result[z].result is False class TestNonMagicOperatorMethods: def test_equals(self): with Flow(name="test") as f: z = Parameter("x").is_equal(Parameter("y")) state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is False state = f.run(parameters=dict(x=5, y=5)) assert state.result[z].result is True def test_not_equals(self): with Flow(name="test") as f: z = Parameter("x").is_not_equal(Parameter("y")) state = f.run(parameters=dict(x=5, y=2)) assert state.result[z].result is True state = f.run(parameters=dict(x=5, y=5)) assert state.result[z].result is False def test_not(self): with Flow(name="test") as f: z = Parameter("x").not_() state = f.run(parameters=dict(x=True)) assert state.result[z].result is False state = f.run(parameters=dict(x=False)) assert state.result[z].result is True
32.16442
86
0.521663
1,620
11,933
3.782099
0.054938
0.052881
0.079321
0.167456
0.88314
0.879223
0.866819
0.865024
0.849682
0.804798
0
0.021382
0.286684
11,933
370
87
32.251351
0.698426
0.062264
0
0.52963
0
0
0.023837
0
0
0
0
0
0.222222
1
0.17037
false
0
0.003704
0
0.185185
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
8a4a087571b0887ecdc7dc345c82e277c9e1a3e0
95
py
Python
src/phonebot/vis/viewer/_pyqtgraph/__init__.py
vi-robotics/pyphonebot-extra
5db65d95fe1fafff2cbac7ca8dba66a71d363d6b
[ "MIT" ]
null
null
null
src/phonebot/vis/viewer/_pyqtgraph/__init__.py
vi-robotics/pyphonebot-extra
5db65d95fe1fafff2cbac7ca8dba66a71d363d6b
[ "MIT" ]
null
null
null
src/phonebot/vis/viewer/_pyqtgraph/__init__.py
vi-robotics/pyphonebot-extra
5db65d95fe1fafff2cbac7ca8dba66a71d363d6b
[ "MIT" ]
null
null
null
from .pyqtgraph_3d import * from .pyqtgraph_backend import * from .pyqtgraph_handlers import *
23.75
33
0.810526
12
95
6.166667
0.5
0.527027
0.513514
0
0
0
0
0
0
0
0
0.012048
0.126316
95
3
34
31.666667
0.879518
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
8a525552cc8ae2223489d522cfb0fb805182c2ec
9,859
py
Python
userbot/modules/file_summary.py
Danzo18/Man-Userbot
b8fdcae0951357406f670f67c9af4510b348f08b
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
userbot/modules/file_summary.py
Danzo18/Man-Userbot
b8fdcae0951357406f670f67c9af4510b348f08b
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
userbot/modules/file_summary.py
Danzo18/Man-Userbot
b8fdcae0951357406f670f67c9af4510b348f08b
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2022-01-26T13:02:09.000Z
2022-01-26T13:02:09.000Z
# Copyright (C) 2021 Catuserbot <https://github.com/sandy1709/catuserbot> # Ported by @mrismanaziz # FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot> # t.me/SharingUserbot & t.me/Lunatic0de import time from prettytable import PrettyTable from userbot import CMD_HELP from userbot.events import register from userbot.utils import _format, edit_delete, edit_or_reply, humanbytes, media_type TYPES = [ "Photo", "Audio", "Video", "Document", "Sticker", "Gif", "Voice", "Round Video", ] def weird_division(n, d): return n / d if d else 0 @register(outgoing=True, pattern=r"^\.chatfs(?: |$)(.*)") async def _(event): # sourcery no-metrics "Shows you the complete media/file summary of the that group" entity = event.chat_id input_str = event.pattern_match.group(1) if input_str: try: entity = int(input_str) except ValueError: entity = input_str starttime = int(time.monotonic()) x = PrettyTable() totalcount = totalsize = msg_count = 0 x.title = "File Summary" x.field_names = ["Media", "Count", "File size"] largest = " <b>Largest Size</b>\n" try: chatdata = await event.client.get_entity(entity) except Exception as e: return await edit_delete( event, f"<b>Error : </b><code>{e}</code>", time=5, parse_mode="HTML", ) if type(chatdata).__name__ == "Channel": if chatdata.username: link = f"<a href='t.me/{chatdata.username}'>{chatdata.title}</a>" else: link = chatdata.title else: link = f"<a href='tg://user?id={chatdata.id}'>{chatdata.first_name}</a>" event = await edit_or_reply( event, f"<b>Menghitung ukuran File dari group </b><code>{link}</code>\n<b>Harap Tunggu Ini mungkin memakan waktu yang lama tergantung pada jumlah pesan grup</b>", parse_mode="HTML", ) media_dict = { m: {"file_size": 0, "count": 0, "max_size": 0, "max_file_link": ""} for m in TYPES } async for message in event.client.iter_messages(entity=entity, limit=None): msg_count += 1 media = media_type(message) if media is not None: media_dict[media]["file_size"] += message.file.size media_dict[media]["count"] += 1 if message.file.size > media_dict[media]["max_size"]: media_dict[media]["max_size"] = message.file.size if type(chatdata).__name__ == "Channel": media_dict[media][ "max_file_link" ] = f"https://t.me/c/{chatdata.id}/{message.id}" # pylint: disable=line-too-long else: media_dict[media][ "max_file_link" ] = f"tg://openmessage?user_id={chatdata.id}&message_id={message.id}" # pylint: disable=line-too-long totalsize += message.file.size totalcount += 1 for mediax in TYPES: x.add_row( [ mediax, media_dict[mediax]["count"], humanbytes(media_dict[mediax]["file_size"]), ] ) if media_dict[mediax]["count"] != 0: largest += f" • <b><a href='{media_dict[mediax]['max_file_link']}'>{mediax}</a> : </b><code>{humanbytes(media_dict[mediax]['max_size'])}</code>\n" endtime = int(time.monotonic()) if endtime - starttime >= 120: runtime = str(round(((endtime - starttime) / 60), 2)) + " minutes" else: runtime = str(endtime - starttime) + " seconds" avghubytes = humanbytes(weird_division(totalsize, totalcount)) avgruntime = ( str(round((weird_division((endtime - starttime), totalcount)) * 1000, 2)) + " ms" ) totalstring = f"<b>Total Files :</b> <code>{totalcount}</code>\n<b>Total File Size :</b> <code>{humanbytes(totalsize)}</code>\n<b>Avg. File Size :</b> <code>{avghubytes}</code>\n" runtimestring = f"<b>Runtime :</b> <code>{runtime}</code>\ \n<b>Runtime per file :</b> <code>{avgruntime}</code>\ \n" line = "<b>━━━━━━━━━━━━━━━━━━━━</b>\n" result = f"<b>Group : {link}</b>\n\n" result += f"<b>Total Messages:</b><code> {msg_count}</code>\n" result += "<b>File Summary : </b>\n" result += f"<code>{x}</code>\n" result += f"{largest}" result += line + totalstring + line + runtimestring + line await event.edit(result, parse_mode="HTML", link_preview=False) @register(outgoing=True, pattern=r"^\.userfs(?: |$)(.*)") async def _(event): # sourcery no-metrics "Shows you the complete media/file summary of the that user in that group." reply = await event.get_reply_message() input_str = event.pattern_match.group(1) if reply and input_str: try: entity = int(input_str) except ValueError: entity = input_str userentity = reply.sender_id elif reply: entity = event.chat_id userentity = reply.sender_id elif input_str: entity = event.chat_id try: userentity = int(input_str) except ValueError: userentity = input_str else: entity = event.chat_id userentity = event.sender_id starttime = int(time.monotonic()) x = PrettyTable() totalcount = totalsize = msg_count = 0 x.title = "File Summary" x.field_names = ["Media", "Count", "File size"] largest = " <b>Largest Size</b>\n" try: chatdata = await event.client.get_entity(entity) except Exception as e: return await edit_delete( event, f"<b>Error : </b><code>{e}</code>", 5, parse_mode="HTML" ) try: userdata = await event.client.get_entity(userentity) except Exception as e: return await edit_delete( event, f"<b>Error : </b><code>{e}</code>", time=5, parse_mode="HTML", ) if type(chatdata).__name__ == "Channel": if chatdata.username: link = f"<a href='t.me/{chatdata.username}'>{chatdata.title}</a>" else: link = chatdata.title else: link = f"<a href='tg://user?id={chatdata.id}'>{chatdata.first_name}</a>" event = await edit_or_reply( event, f"<b>Menghitung ukuran File yang dikirim </b>{_format.htmlmentionuser(userdata.first_name,userdata.id)}<b> di Grup </b><code>{link}</code>\n<b>Harap Tunggu Ini mungkin memakan waktu yang lama tergantung pada jumlah pesan grup</b>", parse_mode="HTML", ) media_dict = { m: {"file_size": 0, "count": 0, "max_size": 0, "max_file_link": ""} for m in TYPES } async for message in event.client.iter_messages( entity=entity, limit=None, from_user=userentity ): msg_count += 1 media = media_type(message) if media is not None: media_dict[media]["file_size"] += message.file.size media_dict[media]["count"] += 1 if message.file.size > media_dict[media]["max_size"]: media_dict[media]["max_size"] = message.file.size if type(chatdata).__name__ == "Channel": media_dict[media][ "max_file_link" ] = f"https://t.me/c/{chatdata.id}/{message.id}" else: media_dict[media][ "max_file_link" ] = f"tg://openmessage?user_id={chatdata.id}&message_id={message.id}" totalsize += message.file.size totalcount += 1 for mediax in TYPES: x.add_row( [ mediax, media_dict[mediax]["count"], humanbytes(media_dict[mediax]["file_size"]), ] ) if media_dict[mediax]["count"] != 0: largest += f" • <b><a href='{media_dict[mediax]['max_file_link']}'>{mediax}</a> : </b><code>{humanbytes(media_dict[mediax]['max_size'])}</code>\n" endtime = int(time.monotonic()) if endtime - starttime >= 120: runtime = str(round(((endtime - starttime) / 60), 2)) + " minutes" else: runtime = str(endtime - starttime) + " seconds" avghubytes = humanbytes(weird_division(totalsize, totalcount)) avgruntime = ( str(round((weird_division((endtime - starttime), totalcount)) * 1000, 2)) + " ms" ) totalstring = f"<b>Total Files :</b> <code>{totalcount}</code>\n<b>Total File Size :</b> <code>{humanbytes(totalsize)}</code>\n<b>Avg. File Size :</b> <code>{avghubytes}\\\x1f \n</code>" runtimestring = f"<b>Runtime :</b> <code>{runtime}</code>\ \n<b>Runtime Per File :</b> <code>{avgruntime}</code>\ \n" line = "<b>━━━━━━━━━━━━━━━━━━━━</b>\n" result = f"<b>Group : {link}\nUser : {_format.htmlmentionuser(userdata.first_name,userdata.id)}</b>\n\n" result += f"<b>Total Messages:</b> <code>{msg_count}</code>\n" result += "<b>File Summary : </b>\n" result += f"<code>{x}</code>\n" result += f"{largest}" result += line + totalstring + line + runtimestring + line await event.edit(result, parse_mode="HTML", link_preview=False) CMD_HELP.update( { "file-summary": "**Plugin : **`file-summery`\ \n\n • **Syntax :** `.chatfs` <username/id>\ \n • **Function : **Untuk Menampilkan ringkasan media/file lengkap dari grup itu\ \n\n • **Syntax :** `.userfs` <reply/username/id>\ \n • **Function : **Untuk Menampilkan ringkasan media/file lengkap dari anggota group tersebut.\ \n\n • **NOTE :** Untuk sekarang terbatas pada 10.000 terakhir di grup yang Anda gunakan\ " } )
38.814961
239
0.567197
1,214
9,859
4.521417
0.171334
0.039351
0.030607
0.024777
0.847149
0.809619
0.809619
0.809619
0.771179
0.771179
0
0.008273
0.2766
9,859
253
240
38.968379
0.754767
0.02982
0
0.701299
0
0.034632
0.26622
0.112809
0
0
0
0
0
1
0.004329
false
0
0.021645
0.004329
0.04329
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
8a76a764e4aa6e39061c52c6432b72fa4dce0929
6,814
py
Python
regress/PORT_ME_TESTS/tests-qos.py
fp7-ofelia/VeRTIGO
11f39f819196c8352611852435dea17bc6a2292f
[ "BSD-3-Clause" ]
2
2016-10-12T08:20:00.000Z
2017-05-09T13:13:18.000Z
regress/PORT_ME_TESTS/tests-qos.py
fp7-ofelia/VeRTIGO
11f39f819196c8352611852435dea17bc6a2292f
[ "BSD-3-Clause" ]
null
null
null
regress/PORT_ME_TESTS/tests-qos.py
fp7-ofelia/VeRTIGO
11f39f819196c8352611852435dea17bc6a2292f
[ "BSD-3-Clause" ]
1
2020-10-01T07:57:34.000Z
2020-10-01T07:57:34.000Z
#!/usr/bin/python from fvregress import * import string # really? you have to do this? if len(sys.argv) > 1 : wantPause = True timeout=9999999 valgrindArgs= [] else: wantPause = False timeout=5 valgrindArgs= None # start up a flowvisor with 1 switch (default) and two guests # out of the flowvisor-conf.d-mobility config dir #h= HyperTest(guests=[('localhost',54321),('localhost',54322), ('localhost',54323)], # hyperargs=['-v0',"-a", "flowvisor-conf.d-qos","ptcp:%d" % HyperTest.OFPORT ],valgrind=valgrindArgs) h = FvRegress.parseConfig(configDir='flowvisor-conf.d-qos', valgrind=valgrindArgs) if wantPause: doPause("start tests") #################################### Start Tests try: feature_request = FvRegress.OFVERSION + '05 0008 2d47 c5eb' feature_request_after = FvRegress.OFVERSION + '05 0008 0001 0000' h.runTest(name="feature_request",timeout=timeout, events= [ TestEvent( "send","guest","alice", feature_request), TestEvent( "recv","switch","switch1", feature_request_after), ]) ######################################## udp = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff ffff 0008 0000 0008 0001 0080 0123 2000 0001 0000 0000 0000 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' udp_and_vlan = FvRegress.OFVERSION + '''0d 00 60 01 01 00 00 ff ff ff ff ff ff 00 10 00 01 00 08 00 0f 00 00 00 00 00 08 00 01 00 80 01 23 20 ff 00 01 00 00 00 00 00 00 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' udp_and_pcp = FvRegress.OFVERSION + '''0d 00 60 02 01 00 00 ff ff ff ff ff ff 00 10 00 02 00 08 03 00 00 00 00 00 00 08 00 01 00 80 01 23 20 ff 00 02 00 00 00 00 00 00 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' udp_and_both = FvRegress.OFVERSION + '''0d 00 68 03 01 00 00 ff ff ff ff ff ff 00 18 00 01 00 08 0f a0 00 00 00 02 00 08 05 00 00 00 00 00 00 08 00 01 00 80 01 23 20 ff 00 03 00 00 00 00 00 00 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' udp_and_slicing = FvRegress.OFVERSION + '''0d 00 60 09 01 00 00 ff ff ff ff ff ff 00 10 00 0b 00 10 00 01 00 00 00 00 00 00 00 00 00 06 01 23 20 ff 00 04 00 00 00 00 00 00 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50 ''' udp_and_both_and_slicing = FvRegress.OFVERSION + '''0d 00 70 0a 01 00 00 ff ff ff ff ff ff 00 20 00 01 00 08 0f a0 00 00 00 02 00 08 05 00 00 00 00 0b 00 10 00 01 00 00 00 00 00 00 00 00 00 07 01 23 20 ff 00 05 00 00 00 00 00 00 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50 ''' h.runTest(name="packet_out qos re-write",timeout=timeout, events= [ TestEvent( "send","guest","alice", udp), TestEvent( "recv","switch","switch1", udp_and_vlan), TestEvent( "send","guest","bob", udp), TestEvent( "recv","switch","switch1", udp_and_pcp), TestEvent( "send","guest","cathy", udp), TestEvent( "recv","switch","switch1", udp_and_both), TestEvent( "send","guest","doug", udp), TestEvent( "recv","switch","switch1", udp_and_slicing), TestEvent( "send","guest","erik", udp), TestEvent( "recv","switch","switch1", udp_and_both_and_slicing), ]) ###################################### flow_mod = FvRegress.OFVERSION + '''0e 00 50 01 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 00 00 08 00 01 00 00''' flow_mod_and_vlan = FvRegress.OFVERSION + '''0e 00 58 01 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 01 00 08 00 0f 00 00 00 00 00 08 00 01 00 00''' flow_mod_and_pcp = FvRegress.OFVERSION + '''0e 00 58 05 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 02 00 08 03 00 00 00 00 00 00 08 00 01 00 00 ''' flow_mod_and_both= FvRegress.OFVERSION + '''0e 00 60 06 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 01 00 08 0f a0 00 00 00 02 00 08 05 00 00 00 00 00 00 08 00 01 00 00 ''' flow_mod_and_slicing = FvRegress.OFVERSION + '''0e 00 58 09 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 0b 00 10 00 01 00 00 00 00 00 00 00 00 00 06 ''' flow_mod_and_both_and_slicing = FvRegress.OFVERSION + '''0e 00 68 0a 01 00 00 00 00 00 00 00 02 00 10 18 07 67 87 00 0d b9 15 c0 44 ff ff 08 00 11 00 c0 a8 02 fe c0 a8 02 02 00 43 00 44 00 00 00 05 00 00 00 00 00 00 00 00 00 00 80 00 00 17 70 97 40 6f 98 02 00 00 00 00 00 01 00 08 0f a0 00 00 00 02 00 08 05 00 00 00 00 0b 00 10 00 01 00 00 00 00 00 00 00 00 00 07 ''' h.runTest(name="flow_mod qos re-write",timeout=timeout, events= [ TestEvent( "send","guest","alice", flow_mod), TestEvent( "recv","switch","switch1", flow_mod_and_vlan), TestEvent( "send","guest","bob", flow_mod), TestEvent( "recv","switch","switch1", flow_mod_and_pcp), TestEvent( "send","guest","cathy", flow_mod), TestEvent( "recv","switch","switch1", flow_mod_and_both), TestEvent( "send","guest","doug", flow_mod), TestEvent( "recv","switch","switch1", flow_mod_and_slicing), TestEvent( "send","guest","erik", flow_mod), TestEvent( "recv","switch","switch1", flow_mod_and_both_and_slicing), ]) ######################################### # more tests for this setup HERE #################################### End Tests finally: if wantPause: doPause("start cleanup") h.cleanup()
45.125828
102
0.623569
1,448
6,814
2.883978
0.134669
0.250958
0.268678
0.262452
0.735393
0.704741
0.620929
0.58501
0.565374
0.533525
0
0.391024
0.270766
6,814
150
103
45.426667
0.449386
0.057382
0
0.354331
0
0.015748
0.671433
0
0
1
0
0
0
0
null
null
0
0.015748
null
null
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
9
8abe31de9e332aa83a5826923a5a0a710e39b6d7
68,389
py
Python
test/python/test_operation.py
Wentong-DST/incubator-singa
0d1eaaac549e574d75a496eee3037ba91fc8f6b9
[ "Apache-2.0" ]
1
2019-11-15T12:46:10.000Z
2019-11-15T12:46:10.000Z
test/python/test_operation.py
laojizi/singa
58e346eb1188faf78497ae2c8e129c99de3d743d
[ "Apache-2.0" ]
null
null
null
test/python/test_operation.py
laojizi/singa
58e346eb1188faf78497ae2c8e129c99de3d743d
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import unittest from builtins import str from singa import tensor from singa import singa_wrap as singa from singa import autograd from singa import singa_wrap from cuda_helper import gpu_dev, cpu_dev import numpy as np autograd.training = True CTensor = singa.Tensor dy = CTensor([2, 1, 2, 2]) singa.Gaussian(0.0, 1.0, dy) def _tuple_to_string(t): lt = [str(x) for x in t] return '(' + ', '.join(lt) + ')' def prepare_inputs_targets_for_rnn_test(): x_0 = np.random.random((2, 3)).astype(np.float32) x_1 = np.random.random((2, 3)).astype(np.float32) x_2 = np.random.random((2, 3)).astype(np.float32) h_0 = np.zeros((2, 2)).astype( np.float32) t_0 = np.random.random((2, 2)).astype(np.float32) t_1 = np.random.random((2, 2)).astype(np.float32) t_2 = np.random.random((2, 2)).astype(np.float32) x0 = tensor.Tensor(device=gpu_dev, data=x_0) x1 = tensor.Tensor(device=gpu_dev, data=x_1) x2 = tensor.Tensor(device=gpu_dev, data=x_2) h0 = tensor.Tensor(device=gpu_dev, data=h_0) t0 = tensor.Tensor(device=gpu_dev, data=t_0) t1 = tensor.Tensor(device=gpu_dev, data=t_1) t2 = tensor.Tensor(device=gpu_dev, data=t_2) inputs = [x0, x1, x2] targets = [t0, t1, t2] return inputs, targets, h0 class TestPythonOperation(unittest.TestCase): def check_shape(self, actual, expect): self.assertEqual(actual, expect, 'shape mismatch, actual shape is %s' ' exepcted is %s' % (_tuple_to_string(actual), _tuple_to_string(expect)) ) def test_Greater_cpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.greater(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd.greater(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_Greater_gpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.greater(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd.greater(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_conv2d_cpu(self): # (in_channels, out_channels, kernel_size) conv_0 = autograd.Conv2d(3, 1, 2) conv_without_bias_0 = autograd.Conv2d(3, 1, 2, bias=False) cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev) cpu_input_tensor.gaussian(0.0, 1.0) dy = tensor.Tensor(shape=(2, 1, 2, 2), device=cpu_dev) dy.gaussian(0.0, 1.0) y = conv_0(cpu_input_tensor) # PyTensor dx, dW, db = y.creator.backward(dy.data) # CTensor self.check_shape(y.shape, (2, 1, 2, 2)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(dW.shape(), (1, 3, 2, 2)) self.check_shape(db.shape(), (1,)) # forward without bias y_without_bias = conv_without_bias_0(cpu_input_tensor) self.check_shape(y_without_bias.shape, (2, 1, 2, 2)) def test_conv2d_gpu(self): # (in_channels, out_channels, kernel_size) conv_0 = autograd.Conv2d(3, 1, 2) conv_without_bias_0 = autograd.Conv2d(3, 1, 2, bias=False) gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) gpu_input_tensor.gaussian(0.0, 1.0) dy = tensor.Tensor(shape=(2, 1, 2, 2), device=gpu_dev) dy.gaussian(0.0, 1.0) y = conv_0(gpu_input_tensor) # PyTensor dx, dW, db = y.creator.backward(dy.data) # CTensor self.check_shape(y.shape, (2, 1, 2, 2)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(dW.shape(), (1, 3, 2, 2)) self.check_shape(db.shape(), (1,)) # forward without bias y_without_bias = conv_without_bias_0(gpu_input_tensor) self.check_shape(y_without_bias.shape, (2, 1, 2, 2)) def test_sum_cpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) x1 = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) y = x+x1 dy = np.ones((3, 2), dtype = np.float32) grad0=dy grad1=dy x = tensor.from_numpy(x) x1 = tensor.from_numpy(x1) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.sum(x,x1) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad1, decimal=5) def test_sum_gpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) x1 = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) y = x+x1 dy = np.ones((3, 2), dtype = np.float32) grad0=dy grad1=dy x = tensor.from_numpy(x) x1 = tensor.from_numpy(x1) dy = tensor.from_numpy(dy) x.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sum(x,x1) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad1, decimal=5) def test_conv2d_cpu(self): # (in_channels, out_channels, kernel_size) conv_1 = autograd.Conv2d(3, 1, 2) conv_without_bias_1 = autograd.Conv2d(3, 1, 2, bias=False) cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev) cpu_input_tensor.gaussian(0.0, 1.0) y = conv_1(cpu_input_tensor) # PyTensor dx, dW, db = y.creator.backward(dy) # CTensor self.check_shape(y.shape, (2, 1, 2, 2)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(dW.shape(), (1, 3, 2, 2)) self.check_shape(db.shape(), (1,)) # forward without bias y_without_bias = conv_without_bias_1(cpu_input_tensor) self.check_shape(y_without_bias.shape, (2, 1, 2, 2)) def test_SeparableConv2d_gpu(self): # SeparableConv2d(in_channels, out_channels, kernel_size) separ_conv=autograd.SeparableConv2d(8, 16, 3, padding=1) x=np.random.random((10,8,28,28)).astype(np.float32) x=tensor.Tensor(device=gpu_dev, data=x) y1 = separ_conv.depthwise_conv(x) y2 = separ_conv.point_conv(y1) dy1, dW_depth = y2.creator.backward(y2.data) dx, dW_spacial = y1.creator.backward(dy1) self.check_shape(y2.shape, (10, 16, 28, 28)) self.check_shape(dy1.shape(), (10, 8, 28, 28)) self.check_shape(dW_depth.shape(), (16, 8, 1, 1)) self.check_shape(dx.shape(), (10, 8, 28, 28)) self.check_shape(dW_spacial.shape(), (8, 1, 3, 3)) y = separ_conv(x) self.check_shape(y.shape, (10, 16, 28, 28)) def test_batchnorm2d_cpu(self): batchnorm_0 = autograd.BatchNorm2d(3) cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev) cpu_input_tensor.gaussian(0.0, 1.0) dy = cpu_input_tensor.clone().data y = batchnorm_0(cpu_input_tensor) dx, ds, db = y.creator.backward(dy) self.check_shape(y.shape, (2, 3, 3, 3)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(ds.shape(), (3,)) self.check_shape(db.shape(), (3,)) def test_batchnorm2d_gpu(self): batchnorm_0 = autograd.BatchNorm2d(3) gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) gpu_input_tensor.gaussian(0.0, 1.0) dy = gpu_input_tensor.clone().data y = batchnorm_0(gpu_input_tensor) dx, ds, db = y.creator.backward(dy) self.check_shape(y.shape, (2, 3, 3, 3)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(ds.shape(), (3,)) self.check_shape(db.shape(), (3,)) def test_vanillaRNN_gpu_tiny_ops_shape_check(self): # gradients shape check. inputs, target, h0 = prepare_inputs_targets_for_rnn_test() rnn = autograd.RNN(3, 2) hs, _ = rnn(inputs, h0) loss = autograd.softmax_cross_entropy(hs[0], target[0]) for i in range(1, len(hs)): l = autograd.softmax_cross_entropy(hs[i], target[i]) loss = autograd.add(loss, l) # d=autograd.infer_dependency(loss.creator) # print(d) for t, dt in autograd.backward(loss): self.check_shape(t.shape, dt.shape) def test_LSTM_gpu_tiny_ops_shape_check(self): # gradients shape check. inputs, target, h0 = prepare_inputs_targets_for_rnn_test() c_0 = np.random.random((2, 1)).astype(np.float32) c0 = tensor.Tensor(device=gpu_dev, data=c_0) rnn = autograd.LSTM(3, 2) hs, _, _ = rnn(inputs, (h0, c0)) loss = autograd.softmax_cross_entropy(hs[0], target[0]) for i in range(1, len(hs)): l = autograd.softmax_cross_entropy(hs[i], target[i]) loss = autograd.add(loss, l) # d=autograd.infer_dependency(loss.creator) # print(d) for t, dt in autograd.backward(loss): self.check_shape(t.shape, dt.shape) def gradients_check(self, func, param, autograds, h=0.0005, df=1): # param: PyTensor # autograds: numpy_tensor p = tensor.to_numpy(param) it = np.nditer(p, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index diff = np.zeros_like(p) diff[idx] += h diff = tensor.from_numpy(diff) diff.to_device(gpu_dev) param += diff pos = func() pos = tensor.to_numpy(pos) param -= diff param -= diff neg = func() neg = tensor.to_numpy(neg) numerical_grad = np.sum((pos - neg) * df) / (2 * h) #print((autograds[idx] - numerical_grad)/numerical_grad) # threshold set as -5% to +5% #self.assertAlmostEqual((autograds[idx] - numerical_grad)/(numerical_grad+0.0000001), 0., places=1) self.assertAlmostEqual( autograds[idx] - numerical_grad, 0., places=2) it.iternext() def test_numerical_gradients_check_for_vallina_rnn(self): inputs, target, h0 = prepare_inputs_targets_for_rnn_test() rnn = autograd.RNN(3, 2) def valinna_rnn_forward(): hs, _ = rnn(inputs, h0) loss = autograd.softmax_cross_entropy(hs[0], target[0]) for i in range(1, len(hs)): l = autograd.softmax_cross_entropy(hs[i], target[i]) loss = autograd.add(loss, l) #grads = autograd.gradients(loss) return loss loss1 = valinna_rnn_forward() auto_grads = autograd.gradients(loss1) for param in rnn.params: auto_grad = tensor.to_numpy(auto_grads[param]) self.gradients_check(valinna_rnn_forward, param, auto_grad) def test_numerical_gradients_check_for_lstm(self): inputs, target, h0 = prepare_inputs_targets_for_rnn_test() c_0 = np.zeros((2, 2)).astype(np.float32) c0 = tensor.Tensor(device=gpu_dev, data=c_0) rnn = autograd.LSTM(3, 2) def lstm_forward(): hs, _, _ = rnn(inputs, (h0, c0)) loss = autograd.softmax_cross_entropy(hs[0], target[0]) for i in range(1, len(hs)): l = autograd.softmax_cross_entropy(hs[i], target[i]) loss = autograd.add(loss, l) return loss loss1 = lstm_forward() auto_grads = autograd.gradients(loss1) for param in rnn.params: auto_grad = tensor.to_numpy(auto_grads[param]) self.gradients_check(lstm_forward, param, auto_grad) def test_MeanSquareError(self): X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32) T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32) x=tensor.from_numpy(X) t=tensor.from_numpy(T) x.to_device(gpu_dev) t.to_device(gpu_dev) loss= autograd.mse_loss(x,t) dx=loss.creator.backward()[0] loss_np=tensor.to_numpy(loss)[0] self.assertAlmostEqual(loss_np, 0.0366666, places=4) self.check_shape(dx.shape(), (3, 2)) def test_Abs(self): X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32) XT=np.array([0.8,1.2,3.3,3.6,0.5,0.5]).reshape(3,2).astype(np.float32) x=tensor.from_numpy(X) x.to_device(gpu_dev) result=autograd.abs(x) dx=result.creator.backward(x.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT) self.check_shape(dx.shape(), (3, 2)) def test_Mean_gpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = (x0+x1)/2 grad=np.ones(x0.shape)/2 x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd.mean(x0,x1) dy = tensor.from_numpy(np.ones((3,2)).astype(np.float32)) dy.to_device(gpu_dev) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad, decimal=5) def test_Mean_cpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = (x0+x1)/2 grad=np.ones(x0.shape)/2 x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd.mean(x0,x1) dy = tensor.from_numpy(np.ones((3,2)).astype(np.float32)) dy.to_device(cpu_dev) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad, decimal=5) def test_Exp(self): X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32) XT=np.exp(X) x=tensor.from_numpy(X) x.to_device(gpu_dev) result=autograd.exp(x) dx=result.creator.backward(x.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) self.check_shape(dx.shape(), (3, 2)) def test_Identity_cpu(self): x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) y = x.copy() grad=np.ones(x.shape) x = tensor.from_numpy(x) x.to_device(cpu_dev) result = autograd.identity(x) dy = tensor.from_numpy(np.ones((3,2)).astype(np.float32)) dy.to_device(cpu_dev) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) self.check_shape(dx.shape(), (3, 2)) def test_Identity_gpu(self): x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) y = x.copy() grad=np.ones(x.shape) x = tensor.from_numpy(x) x.to_device(gpu_dev) result = autograd.identity(x) dy = tensor.from_numpy(np.ones((3,2)).astype(np.float32)) dy.to_device(gpu_dev) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) self.check_shape(dx.shape(), (3, 2)) def test_LeakyRelu(self): X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32) XT=np.array([0.8,-0.012,3.3,-0.036,-0.005,0.5]).reshape(3,2).astype(np.float32) x=tensor.from_numpy(X) x.to_device(gpu_dev) result=autograd.leakyrelu(x) dx=result.creator.backward(x.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT) self.check_shape(dx.shape(), (3, 2)) def test_Cos_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.cos(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.cos(x) dx = result.creator.backward(dy.data) G = - np.sin(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Cos_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.cos(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.cos(x) dx = result.creator.backward(dy.data) G = - np.sin(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Cosh_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.cosh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.cosh(x) dx = result.creator.backward(dy.data) G = np.sinh(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Cosh_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.cosh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.cosh(x) dx = result.creator.backward(dy.data) G = np.sinh(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Acos_cpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arccos(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.acos(x) dx = result.creator.backward(dy.data) G = - 1.0 / np.sqrt( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Acos_gpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arccos(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.acos(x) dx = result.creator.backward(dy.data) G = - 1.0 / np.sqrt( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Acosh_cpu(self): X = np.array([1.1, 1.5, 1.9, 2.2, 2.5, 2.8]).reshape(3, 2).astype(np.float32) XT = np.arccosh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.acosh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.multiply( np.sqrt( X - 1.0 ) , np.sqrt( X + 1.0 ) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Acosh_gpu(self): X = np.array([1.1, 1.5, 1.9, 2.2, 2.5, 2.8]).reshape(3, 2).astype(np.float32) XT = np.arccosh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.acosh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.multiply( np.sqrt( X - 1.0 ) , np.sqrt( X + 1.0 ) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sin_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sin(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.sin(x) dx = result.creator.backward(dy.data) G = np.cos(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sin_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sin(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sin(x) dx = result.creator.backward(dy.data) G = np.cos(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sinh_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sinh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.sinh(x) dx = result.creator.backward(dy.data) G = np.cosh(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sinh_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sinh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sinh(x) dx = result.creator.backward(dy.data) G = np.cosh(X) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Asin_cpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arcsin(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.asin(x) dx = result.creator.backward(dy.data) G = 1.0 / np.sqrt( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Asin_gpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arcsin(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.asin(x) dx = result.creator.backward(dy.data) G = 1.0 / np.sqrt( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Asinh_cpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arcsinh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.asinh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.sqrt( np.square(X) + 1.0 ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Less_gpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.less(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd.less(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_Less_cpu(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.less(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd.less(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_Asinh_gpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arcsinh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.asinh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.sqrt( np.square(X) + 1.0 ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Tan_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.tan(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.tan(x) dx = result.creator.backward(dy.data) G = 1.0 / np.square( np.cos(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Tan_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.tan(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.tan(x) dx = result.creator.backward(dy.data) G = 1.0 / np.square( np.cos(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Tanh_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.tanh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.tanh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.square( np.cosh(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Tanh_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.tanh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.tanh(x) dx = result.creator.backward(dy.data) G = 1.0 / np.square( np.cosh(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Atan_cpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arctan(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.atan(x) dx = result.creator.backward(dy.data) G = 1.0 / ( 1.0 + np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Atan_gpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arctan(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.atan(x) dx = result.creator.backward(dy.data) G = 1.0 / ( 1.0 + np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Atanh_cpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arctanh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.atanh(x) dx = result.creator.backward(dy.data) G = 1.0 / ( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Atanh_gpu(self): X = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) XT = np.arctanh(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.atanh(x) dx = result.creator.backward(dy.data) G = 1.0 / ( 1.0 - np.square(X) ) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sub_cpu(self): X0 = np.array([7, -5, 0.2, -0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([0.6, -1.3, 0.1, -0.1, 0.4, 0.3]).reshape(3, 2).astype(np.float32) XT = np.subtract(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.sub(x0, x1) dx0, dx1 = result.creator.backward(dy.data) DX0 = np.multiply(DY, 1.0) DX1 = np.multiply(DY, -1.0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_Sub_gpu(self): X0 = np.array([7, -5, 0.2, -0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([0.6, -1.3, 0.1, -0.1, 0.4, 0.3]).reshape(3, 2).astype(np.float32) XT = np.subtract(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sub(x0, x1) dx0, dx1 = result.creator.backward(dy.data) DX0 = np.multiply(DY, 1.0) DX1 = np.multiply(DY, -1.0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_Pow_cpu(self): X0 = np.array([7, 5, 0.2, 0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([-1.0, 2.0, -1.0, -2.1, 1.0, -2.0]).reshape(3, 2).astype(np.float32) XT = np.power(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.pow(x0, x1) dx0, dx1 = result.creator.backward(dy.data) G0 = np.multiply(X1, np.power(X0, (X1 - 1.0)) ) DX0 = np.multiply(G0, DY) G1 = np.multiply(np.power(X0, X1), np.log(X0) ) DX1 = np.multiply(G1, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=4) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=4) def test_Pow_gpu(self): X0 = np.array([7, 5, 0.2, 0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([-1.0, 2.0, -1.0, -2.1, 1.0, -2.0]).reshape(3, 2).astype(np.float32) XT = np.power(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.pow(x0, x1) dx0, dx1 = result.creator.backward(dy.data) G0 = np.multiply(X1, np.power(X0, (X1 - 1.0)) ) DX0 = np.multiply(G0, DY) G1 = np.multiply(np.power(X0, X1), np.log(X0) ) DX1 = np.multiply(G1, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=4) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=4) def test_SoftSign_cpu(self): # y = x / (1 + np.abs(x)) X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = X/(1 + np.absolute(X)) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.softsign(x) dx = result.creator.backward(dy.data) G = 1.0/np.square(np.absolute(X)+1.0) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_SoftSign_gpu(self): # y = x / (1 + np.abs(x)) X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = X/(1 + np.absolute(X)) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.softsign(x) dx = result.creator.backward(dy.data) G = 1.0/np.square(np.absolute(X)+1.0) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_SoftPlus_cpu(self): #y = np.log(np.exp(x) + 1) X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.log(np.exp(X) + 1) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.softplus(x) dx = result.creator.backward(dy.data) G = 1.0 / (1.0 + np.exp(-X)) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_SoftPlus_gpu(self): #y = np.log(np.exp(x) + 1) X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.log(np.exp(X) + 1) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.softplus(x) dx = result.creator.backward(dy.data) G = 1.0 / (1.0 + np.exp(-X)) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_unsqueeze_cpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(1,2,3).astype(np.float32) y = x.reshape(1, 1, 2, 3, 1) dy = np.ones((1, 1, 2, 3, 1), dtype = np.float32) grad = dy.reshape(1,2,3) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.unsqueeze(x,[0, 4]) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_unsqueeze_gpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(1,2,3).astype(np.float32) y = x.reshape(1, 1, 2, 3, 1) dy = np.ones((1, 1, 2, 3, 1), dtype = np.float32) grad = dy.reshape(1,2,3) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.unsqueeze(x,[0, 4]) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_Sqrt_cpu(self): X = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) XT = np.sqrt(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.sqrt(x) dx = result.creator.backward(dy.data) G = 0.5 * np.power(X, -0.5) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sqrt_gpu(self): X = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) XT = np.sqrt(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sqrt(x) dx = result.creator.backward(dy.data) G = 0.5 * np.power(X, -0.5) DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_transpose_cpu(self): x = np.random.randn(3,2,1) y = x.transpose(1,2,0) dy = np.random.randn(*(y.shape)) grad = dy.transpose((2,0,1)) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.transpose(x,(1,2,0)) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_transpose_gpu(self): x = np.random.randn(3,2,1) y = x.transpose(1,2,0) dy = np.random.randn(*(y.shape)) grad = dy.transpose((2,0,1)) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.transpose(x,(1,2,0)) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_Sign_cpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sign(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sign(x) dx = result.creator.backward(dy.data) DX = np.multiply(DY,0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Sign_gpu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.sign(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.sign(x) dx = result.creator.backward(dy.data) DX = np.multiply(DY,0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Log_cpu(self): X = np.array([0.1,1.0,0.4,1.4,0.9,2.0]).reshape(3,2).astype(np.float32) XT = np.log(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.log(x) dx = result.creator.backward(dy.data) #dx = 1/x G = 1.0 / X DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_Log_gpu(self): X = np.array([0.1,1.0,0.4,1.4,0.9,2.0]).reshape(3,2).astype(np.float32) XT = np.log(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.log(x) dx = result.creator.backward(dy.data) #dx = 1/x G = 1.0 / X DX = np.multiply(G, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_mul_cpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) x1 = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) y = x*x1 dy = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) grad0=x1*dy grad1=x*dy x = tensor.from_numpy(x) slope = tensor.from_numpy(x1) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) slope.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.mul(x,slope) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad1, decimal=5) def test_mul_gpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) x1 = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) y = x*x1 dy = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32) grad0=x1*dy grad1=x*dy x = tensor.from_numpy(x) slope = tensor.from_numpy(x1) dy = tensor.from_numpy(dy) x.to_device(gpu_dev) slope.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.mul(x,slope) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad1, decimal=5) def test_reshape_cpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) y = x.reshape(2,3) dy = np.ones((3, 2), dtype = np.float32) grad = dy.reshape(3,2) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.reshape(x,(2,3)) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_reshape_gpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) y = x.reshape(2,3) dy = np.ones((3, 2), dtype = np.float32) grad = dy.reshape(3,2) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.reshape(x,(2,3)) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) def test_max_cpu(self): X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1, 0.2]).reshape(3, 2).astype(np.float32) X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0, 2.0]).reshape(3, 2).astype(np.float32) XT=np.maximum(X0,X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.max(x0,x1) dx0,dx1 = result.creator.backward(dy.data) G = np.subtract(X0,X1) DX0 = np.where(G>0 , 1, G*0) DX1 = np.where(G<0 , 1, G*0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_max_gpu(self): X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1, 0.2]).reshape(3, 2).astype(np.float32) X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0, 2.0]).reshape(3, 2).astype(np.float32) XT=np.maximum(X0,X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.max(x0,x1) dx0,dx1 = result.creator.backward(dy.data) G = np.subtract(X0,X1) DX0 = np.where(G>0 , 1, G*0) DX1 = np.where(G<0 , 1, G*0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_Div_cpu(self): X0 = np.array([7, -5, 0.2, -0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([0.6, -1.3, 0.1, -0.1, 0.4, 0.3]).reshape(3, 2).astype(np.float32) XT = np.divide(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.div(x0, x1) dx0, dx1 = result.creator.backward(dy.data) G0 = 1.0 / X1 DX0 = np.multiply(G0, DY) G1 = np.divide(-X0, np.square(X1)) DX1 = np.multiply(G1, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_Div_gpu(self): X0 = np.array([7, -5, 0.2, -0.1, 0.3, 4]).reshape(3, 2).astype(np.float32) X1 = np.array([0.6, -1.3, 0.1, -0.1, 0.4, 0.3]).reshape(3, 2).astype(np.float32) XT = np.divide(X0, X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.div(x0, x1) dx0, dx1 = result.creator.backward(dy.data) G0 = 1.0 / X1 DX0 = np.multiply(G0, DY) G1 = np.divide(-X0, np.square(X1)) DX1 = np.multiply(G1, DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_squeeze(self): def squeeze_helper(gpu=False): x = np.random.randn(3,1,2,1,1) y = x.reshape(3, 2) dy = np.random.randn(3, 2) grad = dy.reshape(3,1,2,1,1) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) if(gpu): x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.squeeze(x,[1,3,4]) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) squeeze_helper(False) squeeze_helper(True) def test_shape_cpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) y = list(x.shape) dy = np.ones((3, 2), dtype = np.float32) grad = list(dy.shape) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(cpu_dev) dy.to_device(cpu_dev) result=autograd.shape(x) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(dx, grad, decimal=5) def test_shape_gpu(self): x = np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32) y = list(x.shape) dy = np.ones((3, 2), dtype = np.float32) grad = list(dy.shape) x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) x.to_device(gpu_dev) dy.to_device(gpu_dev) result=autograd.shape(x) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(dx, grad, decimal=5) def test_min_cpu(self): X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1, 0.2]).reshape(3, 2).astype(np.float32) X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0, 2.0]).reshape(3, 2).astype(np.float32) XT=np.minimum(X0,X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(cpu_dev) x1.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.min(x0,x1) dx0,dx1 = result.creator.backward(dy.data) G = np.subtract(X0,X1) DX0 = np.where(G<0 , 1, G*0) DX1 = np.where(G>0 , 1, G*0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_min_gpu(self): X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1, 0.2]).reshape(3, 2).astype(np.float32) X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0, 2.0]).reshape(3, 2).astype(np.float32) XT=np.minimum(X0,X1) DY = np.ones((3, 2), dtype = np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) dy = tensor.from_numpy(DY) x0.to_device(gpu_dev) x1.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.min(x0,x1) dx0,dx1 = result.creator.backward(dy.data) G = np.subtract(X0,X1) DX0 = np.where(G<0 , 1, G*0) DX1 = np.where(G>0 , 1, G*0) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5) def test_HardSigmoid(self): def test_helper(gpu=False): x = np.random.randn(3, 2) #y = max(0, min(1, alpha * x + gamma)) a=0.2 g=0.5 y = np.clip(x * 0.2 + 0.5, 0, 1) dy=np.random.randn(3,2) grad=(0<(np.clip(x * 0.2 + 0.5, 0, 1)) * (np.clip(x * 0.2 + 0.5, 0, 1)<1))*0.2 * dy x = tensor.from_numpy(x) dy = tensor.from_numpy(dy) if(gpu): x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.hardsigmoid(x,a,g) dx = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5) test_helper(False) test_helper(True) def test_prelu(self): def test_helper(gpu): x = np.random.randn(3, 2) slope = np.random.randn(3, 2) y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope dy = np.random.randn(3, 2) x0=x.copy() x0[x0>0]=1 x0[x0<1]=0 grad0=(x0+(1-x0)*slope)*dy grad1 = (1-x0)*x*dy x = tensor.from_numpy(x) slope = tensor.from_numpy(slope) dy = tensor.from_numpy(dy) if(gpu): x.to_device(gpu_dev) slope.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.prelu(x,slope) dx0,dx1 = result.creator.backward(dy.data) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), grad0, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), grad1, decimal=5) test_helper(False) if(singa_wrap.USE_CUDA): test_helper(True) def test_SeLU(self): def test_helper(gpu): x = np.random.randn(3, 2) a=0.2 g=0.3 y = np.clip(x, 0, np.inf) * g + (np.exp(np.clip(x, -np.inf, 0)) - 1) * a * g dy=np.random.randn(3, 2) grad = (np.exp(np.clip(x, -np.inf, 0))) * g grad[x<=0]=grad[x<=0]*a grad*=dy x = tensor.from_numpy(x) def test_and_cpu(self): x0 = np.array([0, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) y = np.logical_and(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd._and(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_and_gpu(self): x0 = np.array([0, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) y = np.logical_and(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd._and(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_or_cpu(self): x0 = np.array([1.0, 1.0, 2.0, -3.0, 0, -7.0]).reshape(3, 2).astype(np.float32) x1 = np.array([-1.0, 0, 2.0, 4.0, 0, -7.0]).reshape(3, 2).astype(np.float32) y = np.logical_or(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd._or(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_or_gpu(self): x0 = np.array([1.0, 1.0, 2.0, -3.0, 0, -7.0]).reshape(3, 2).astype(np.float32) x1 = np.array([-1.0, 0, 2.0, 4.0, 0, -7.0]).reshape(3, 2).astype(np.float32) y = np.logical_or(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd._or(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_not_cpu(self): x = np.array([1.0, -1.0, 0, -0.1, 0, -7.0]).reshape(3, 2).astype(np.float32) y = np.logical_not(x) x = tensor.from_numpy(x) x.to_device(cpu_dev) result = autograd._not(x) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_not_gpu(self): x = np.array([1.0, -1.0, 0, -0.1, 0, -7.0]).reshape(3, 2).astype(np.float32) y = np.logical_not(x) x = tensor.from_numpy(x) x.to_device(gpu_dev) result = autograd._not(x) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_xor_cpu(self): x0 = np.array([0, -0.3, -0.1, 0.1, 0.5, 9.0]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.logical_xor(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) result = autograd._xor(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_xor_gpu(self): x0 = np.array([0, -0.3, -0.1, 0.1, 0.5, 9.0]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) y = np.logical_xor(x0,x1) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) result = autograd._xor(x0,x1) np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5) def test_negative_cpu(self): X = np.array([0.1,0,0.4,1.-4,0.9,-2.0]).reshape(3,2).astype(np.float32) XT = np.negative(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.negative(x) dx = result.creator.backward(dy.data) DX = np.negative(DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_negative_gpu(self): X = np.array([0.1,0,0.4,1.-4,0.9,-2.0]).reshape(3,2).astype(np.float32) XT = np.negative(X) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.negative(x) dx = result.creator.backward(dy.data) DX = np.negative(DY) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_reciprocal_cpu(self): X = np.array([0.1,0,0.4,1.-4,0.9,-2.0]).reshape(3,2).astype(np.float32) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(cpu_dev) dy.to_device(cpu_dev) result = autograd.reciprocal(x) dx = result.creator.backward(dy.data) #dy/dx = -1/x**2 with np.errstate(divide='ignore'): XT = np.reciprocal(X) DX = -1/np.square(X) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) def test_reciprocal_gpu(self): X = np.array([0.1,0,0.4,1.-4,0.9,-2.0]).reshape(3,2).astype(np.float32) DY = np.ones((3, 2), dtype = np.float32) x = tensor.from_numpy(X) dy = tensor.from_numpy(DY) x.to_device(gpu_dev) dy.to_device(gpu_dev) result = autograd.reciprocal(x) dx = result.creator.backward(dy.data) #dy/dx = -1/x**2 with np.errstate(divide='ignore'): XT = np.reciprocal(X) DX = -1/np.square(X) np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5) np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5) if __name__ == '__main__': unittest.main()
36.591225
112
0.598123
11,245
68,389
3.471054
0.028813
0.067381
0.069174
0.085058
0.933465
0.922807
0.909741
0.899826
0.894189
0.887708
0
0.062875
0.244879
68,389
1,868
113
36.610814
0.692944
0.024916
0
0.832122
0
0
0.001396
0
0
0
0
0
0.12282
1
0.075581
false
0
0.005814
0
0.085029
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
76e2340689e3f62ee6bee0f3bd19ecc30cdcf5e0
61,303
py
Python
tb_rest_client/api/api_pe/entity_view_controller_api.py
samson0v/python_tb_rest_client
08ff7898740f7cec2170e85d5c3c89e222e967f7
[ "Apache-2.0" ]
30
2020-06-19T06:42:50.000Z
2021-08-23T21:16:36.000Z
tb_rest_client/api/api_pe/entity_view_controller_api.py
samson0v/python_tb_rest_client
08ff7898740f7cec2170e85d5c3c89e222e967f7
[ "Apache-2.0" ]
25
2021-08-30T01:17:27.000Z
2022-03-16T14:10:14.000Z
tb_rest_client/api/api_pe/entity_view_controller_api.py
samson0v/python_tb_rest_client
08ff7898740f7cec2170e85d5c3c89e222e967f7
[ "Apache-2.0" ]
23
2020-07-06T13:41:54.000Z
2021-08-23T21:04:50.000Z
# coding: utf-8 """ ThingsBoard REST API ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501 OpenAPI spec version: 3.3.3PAAS-RC1 Contact: info@thingsboard.io Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from tb_rest_client.api_client import ApiClient class EntityViewControllerApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def delete_entity_view_using_delete(self, entity_view_id, **kwargs): # noqa: E501 """Delete entity view (deleteEntityView) # noqa: E501 Delete the EntityView object based on the provided entity view id. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_entity_view_using_delete(entity_view_id, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501 else: (data) = self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501 return data def delete_entity_view_using_delete_with_http_info(self, entity_view_id, **kwargs): # noqa: E501 """Delete entity view (deleteEntityView) # noqa: E501 Delete the EntityView object based on the provided entity view id. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_entity_view_using_delete_with_http_info(entity_view_id, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['entity_view_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_entity_view_using_delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_view_id' is set if ('entity_view_id' not in params or params['entity_view_id'] is None): raise ValueError("Missing the required parameter `entity_view_id` when calling `delete_entity_view_using_delete`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_view_id' in params: path_params['entityViewId'] = params['entity_view_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityView/{entityViewId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def find_by_query_using_post4(self, **kwargs): # noqa: E501 """Find related entity views (findByQuery) # noqa: E501 Returns all entity views that are related to the specific entity. The entity id, relation type, entity view types, depth of the search, and other query parameters defined using complex 'EntityViewSearchQuery' object. See 'Model' tab of the Parameters for more info. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.find_by_query_using_post4(async_req=True) >>> result = thread.get() :param async_req bool :param EntityViewSearchQuery body: :return: list[EntityView] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.find_by_query_using_post4_with_http_info(**kwargs) # noqa: E501 else: (data) = self.find_by_query_using_post4_with_http_info(**kwargs) # noqa: E501 return data def find_by_query_using_post4_with_http_info(self, **kwargs): # noqa: E501 """Find related entity views (findByQuery) # noqa: E501 Returns all entity views that are related to the specific entity. The entity id, relation type, entity view types, depth of the search, and other query parameters defined using complex 'EntityViewSearchQuery' object. See 'Model' tab of the Parameters for more info. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.find_by_query_using_post4_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param EntityViewSearchQuery body: :return: list[EntityView] If the method is called asynchronously, returns the request thread. """ all_params = ['body'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method find_by_query_using_post4" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityViews', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[EntityView]', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_customer_entity_views_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501 """Get Customer Entity Views (getCustomerEntityViews) # noqa: E501 Returns a page of Entity View objects assigned to customer. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_customer_entity_views_using_get(customer_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501 return data def get_customer_entity_views_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501 """Get Customer Entity Views (getCustomerEntityViews) # noqa: E501 Returns a page of Entity View objects assigned to customer. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_customer_entity_views_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'customer_id' is set if ('customer_id' not in params or params['customer_id'] is None): raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_entity_views_using_get`") # noqa: E501 # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_customer_entity_views_using_get`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_customer_entity_views_using_get`") # noqa: E501 collection_formats = {} path_params = {} if 'customer_id' in params: path_params['customerId'] = params['customer_id'] # noqa: E501 query_params = [] if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'type' in params: query_params.append(('type', params['type'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/customer/{customerId}/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_entity_view_by_id_using_get(self, entity_view_id, **kwargs): # noqa: E501 """Get entity view (getEntityViewById) # noqa: E501 Fetch the EntityView object based on the provided entity view id. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_view_by_id_using_get(entity_view_id, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :return: EntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501 else: (data) = self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501 return data def get_entity_view_by_id_using_get_with_http_info(self, entity_view_id, **kwargs): # noqa: E501 """Get entity view (getEntityViewById) # noqa: E501 Fetch the EntityView object based on the provided entity view id. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_view_by_id_using_get_with_http_info(entity_view_id, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :return: EntityView If the method is called asynchronously, returns the request thread. """ all_params = ['entity_view_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_entity_view_by_id_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_view_id' is set if ('entity_view_id' not in params or params['entity_view_id'] is None): raise ValueError("Missing the required parameter `entity_view_id` when calling `get_entity_view_by_id_using_get`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_view_id' in params: path_params['entityViewId'] = params['entity_view_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityView/{entityViewId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='EntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_entity_view_types_using_get(self, **kwargs): # noqa: E501 """Get Entity View Types (getEntityViewTypes) # noqa: E501 Returns a set of unique entity view types based on entity views that are either owned by the tenant or assigned to the customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_view_types_using_get(async_req=True) >>> result = thread.get() :param async_req bool :return: list[EntitySubtype] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501 return data def get_entity_view_types_using_get_with_http_info(self, **kwargs): # noqa: E501 """Get Entity View Types (getEntityViewTypes) # noqa: E501 Returns a set of unique entity view types based on entity views that are either owned by the tenant or assigned to the customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_view_types_using_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: list[EntitySubtype] If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_entity_view_types_using_get" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityView/types', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[EntitySubtype]', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_entity_views_by_entity_group_id_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501 """Get entity views by Entity Group Id (getEntityViewsByEntityGroupId) # noqa: E501 Returns a page of Entity View objects that belongs to specified Entity View Id. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_views_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501 return data def get_entity_views_by_entity_group_id_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501 """Get entity views by Entity Group Id (getEntityViewsByEntityGroupId) # noqa: E501 Returns a page of Entity View objects that belongs to specified Entity View Id. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required) :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_entity_views_by_entity_group_id_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_group_id' is set if ('entity_group_id' not in params or params['entity_group_id'] is None): raise ValueError("Missing the required parameter `entity_group_id` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501 # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_group_id' in params: path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501 query_params = [] if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityGroup/{entityGroupId}/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_entity_views_by_ids_using_get(self, entity_view_ids, **kwargs): # noqa: E501 """Get Entity Views By Ids (getEntityViewsByIds) # noqa: E501 Requested entity views must be owned by tenant or assigned to customer which user is performing the request. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_views_by_ids_using_get(entity_view_ids, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_ids: A list of entity view ids, separated by comma ',' (required) :return: list[EntityView] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, **kwargs) # noqa: E501 else: (data) = self.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, **kwargs) # noqa: E501 return data def get_entity_views_by_ids_using_get_with_http_info(self, entity_view_ids, **kwargs): # noqa: E501 """Get Entity Views By Ids (getEntityViewsByIds) # noqa: E501 Requested entity views must be owned by tenant or assigned to customer which user is performing the request. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_ids: A list of entity view ids, separated by comma ',' (required) :return: list[EntityView] If the method is called asynchronously, returns the request thread. """ all_params = ['entity_view_ids'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_entity_views_by_ids_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_view_ids' is set if ('entity_view_ids' not in params or params['entity_view_ids'] is None): raise ValueError("Missing the required parameter `entity_view_ids` when calling `get_entity_views_by_ids_using_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'entity_view_ids' in params: query_params.append(('entityViewIds', params['entity_view_ids'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityViews{?entityViewIds}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[EntityView]', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_tenant_entity_view_using_get(self, entity_view_name, **kwargs): # noqa: E501 """Get Entity View by name (getTenantEntityView) # noqa: E501 Fetch the Entity View object based on the tenant id and entity view name. Available for users with 'TENANT_ADMIN' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tenant_entity_view_using_get(entity_view_name, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_name: Entity View name (required) :return: EntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501 else: (data) = self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501 return data def get_tenant_entity_view_using_get_with_http_info(self, entity_view_name, **kwargs): # noqa: E501 """Get Entity View by name (getTenantEntityView) # noqa: E501 Fetch the Entity View object based on the tenant id and entity view name. Available for users with 'TENANT_ADMIN' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tenant_entity_view_using_get_with_http_info(entity_view_name, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_view_name: Entity View name (required) :return: EntityView If the method is called asynchronously, returns the request thread. """ all_params = ['entity_view_name'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_tenant_entity_view_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_view_name' is set if ('entity_view_name' not in params or params['entity_view_name'] is None): raise ValueError("Missing the required parameter `entity_view_name` when calling `get_tenant_entity_view_using_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'entity_view_name' in params: query_params.append(('entityViewName', params['entity_view_name'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/tenant/entityViews{?entityViewName}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='EntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_tenant_entity_views_using_get(self, page_size, page, **kwargs): # noqa: E501 """Get Tenant Entity Views (getTenantEntityViews) # noqa: E501 Returns a page of entity views owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tenant_entity_views_using_get(page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501 return data def get_tenant_entity_views_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501 """Get Tenant Entity Views (getTenantEntityViews) # noqa: E501 Returns a page of entity views owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tenant_entity_views_using_get_with_http_info(page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_tenant_entity_views_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_entity_views_using_get`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_tenant_entity_views_using_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'type' in params: query_params.append(('type', params['type'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/tenant/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_user_entity_views_using_get(self, page_size, page, **kwargs): # noqa: E501 """Get Entity Views (getUserEntityViews) # noqa: E501 Returns a page of entity views that are available for the current user. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_user_entity_views_using_get(page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_user_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_user_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501 return data def get_user_entity_views_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501 """Get Entity Views (getUserEntityViews) # noqa: E501 Returns a page of entity views that are available for the current user. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_user_entity_views_using_get_with_http_info(page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param int page_size: Maximum amount of entities in a one page (required) :param int page: Sequence number of page starting from 0 (required) :param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ``` :param str text_search: The case insensitive 'startsWith' filter based on the entity view name. :param str sort_property: Property of entity to sort by :param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) :return: PageDataEntityView If the method is called asynchronously, returns the request thread. """ all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_user_entity_views_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_user_entity_views_using_get`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_user_entity_views_using_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'type' in params: query_params.append(('type', params['type'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/user/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def save_entity_view_using_post(self, **kwargs): # noqa: E501 """Save or update entity view (saveEntityView) # noqa: E501 Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.save_entity_view_using_post(async_req=True) >>> result = thread.get() :param async_req bool :param EntityView body: :param str entity_group_id: entityGroupId :return: EntityView If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.save_entity_view_using_post_with_http_info(**kwargs) # noqa: E501 else: (data) = self.save_entity_view_using_post_with_http_info(**kwargs) # noqa: E501 return data def save_entity_view_using_post_with_http_info(self, **kwargs): # noqa: E501 """Save or update entity view (saveEntityView) # noqa: E501 Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.save_entity_view_using_post_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param EntityView body: :param str entity_group_id: entityGroupId :return: EntityView If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'entity_group_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method save_entity_view_using_post" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'entity_group_id' in params: query_params.append(('entityGroupId', params['entity_group_id'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/entityView{?entityGroupId}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='EntityView', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
51.819949
720
0.655906
7,696
61,303
4.992204
0.04119
0.037689
0.016033
0.020614
0.98113
0.973477
0.965851
0.956793
0.953514
0.9462
0
0.016253
0.261309
61,303
1,182
721
51.86379
0.83217
0.45143
0
0.803459
0
0.001572
0.218066
0.068798
0
0
0
0
0
1
0.036164
false
0
0.006289
0
0.095912
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
6a5ad84559bccf6fa04a383a8bc1a51161c5ae64
9,293
py
Python
auth-api/tests/unit/models/views/test_authorization.py
bsnopek-freshworks/sbc-auth
871800922461239c7a09225a3d708c79173410f9
[ "Apache-2.0" ]
null
null
null
auth-api/tests/unit/models/views/test_authorization.py
bsnopek-freshworks/sbc-auth
871800922461239c7a09225a3d708c79173410f9
[ "Apache-2.0" ]
null
null
null
auth-api/tests/unit/models/views/test_authorization.py
bsnopek-freshworks/sbc-auth
871800922461239c7a09225a3d708c79173410f9
[ "Apache-2.0" ]
null
null
null
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Authorizations view. Test suite to ensure that the Authorizations view routines are working as expected. """ import uuid from auth_api.models.views.authorization import Authorization from tests.utilities.factory_scenarios import TestUserInfo from tests.utilities.factory_utils import ( factory_affiliation_model, factory_entity_model, factory_membership_model, factory_org_model, factory_user_model) def test_find_user_authorization_by_business_number(session): # pylint:disable=unused-argument """Assert that authorization view is returning result.""" user = factory_user_model() org = factory_org_model() membership = factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_business_number(str(user.keycloak_guid), entity.business_identifier) assert authorization is not None assert authorization.org_membership == membership.membership_type_code def test_find_user_authorization_by_org_id(session): # pylint:disable=unused-argument """Assert that authorization view is returning result.""" user = factory_user_model() org = factory_org_model() membership = factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_org_id(str(user.keycloak_guid), org.id) assert authorization is not None assert authorization.org_membership == membership.membership_type_code def test_find_user_authorization_by_org_id_and_corp_type(session): # pylint:disable=unused-argument """Assert that authorization view returns result when fetched using Corp type instead of jwt. Service accounts passes corp type instead of jwt. """ user = factory_user_model() org = factory_org_model() membership = factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_org_id_and_corp_type(org.id, 'CP') assert authorization is not None assert authorization.org_membership == membership.membership_type_code def test_find_user_authorization_by_org_id_and_corp_type_multiple_membership(session): # pylint:disable=unused-argument """Assert that authorization view returns result when fetched using Corp type instead of jwt. When multiple membership is present , return the one with Owner access. """ user1 = factory_user_model() user2 = factory_user_model(user_info=TestUserInfo.user2) org = factory_org_model() factory_membership_model(user1.id, org.id, member_type='ADMIN') membership_owner = factory_membership_model(user2.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_org_id_and_corp_type(org.id, 'CP') assert authorization is not None assert authorization.org_membership == membership_owner.membership_type_code def test_find_user_authorization_by_business_number_and_corp_type_multiple_membership( session): # pylint:disable=unused-argument """Assert that authorization view returns result when fetched using Corp type instead of jwt. When multiple membership is present , return the one with Owner access """ user1 = factory_user_model() user2 = factory_user_model(user_info=TestUserInfo.user2) org = factory_org_model() factory_membership_model(user1.id, org.id, member_type='ADMIN') membership_owner = factory_membership_model(user2.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_business_number_and_corp_type(entity.business_identifier, 'CP') assert authorization is not None assert authorization.org_membership == membership_owner.membership_type_code def test_find_user_authorization_by_org_id_and_invalid_corp_type(session): # pylint:disable=unused-argument """Assert that authorization view is not returning result when invalid corp type is passed.""" user = factory_user_model() org = factory_org_model() factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_org_id_and_corp_type(org.id, 'invalid_corp_type') assert authorization is None def test_find_user_authorization_by_org_id_and_invalid_corp_type_no_affliation( session): # pylint:disable=unused-argument # noqa: E501 """Assert that authorization view is returning correct result for an unclaimed/unaffiliated organization.""" user = factory_user_model() org = factory_org_model() factory_membership_model(user.id, org.id) authorization = Authorization.find_user_authorization_by_org_id_and_corp_type(org.id, 'invalid_corp_type') assert authorization is not None def test_find_user_authorization_by_business_number_and_corp_type(session): # pylint:disable=unused-argument """Assert that authorization view returns result when fetched using Corp type instead of jwt. Service accounts passes corp type instead of jwt. """ user = factory_user_model() org = factory_org_model() membership = factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_business_number_and_corp_type(entity.business_identifier, 'CP') assert authorization is not None assert authorization.org_membership == membership.membership_type_code def test_find_user_authorization_by_business_number_and_invalid_corp_type(session): # pylint:disable=unused-argument """Assert that authorization view is not returning result when invalid corp type is passed.""" user = factory_user_model() org = factory_org_model() factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_business_number_and_corp_type(entity.business_identifier, 'invalid_corp_type') assert authorization is None def test_find_invalid_user_authorization_by_business_number(session): # pylint:disable=unused-argument """Test with invalid user id and assert that auth is None.""" user = factory_user_model() org = factory_org_model() factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorization = Authorization.find_user_authorization_by_business_number(str(uuid.uuid4()), entity.business_identifier) assert authorization is None # Test with invalid business identifier authorization = Authorization.find_user_authorization_by_business_number(str(uuid.uuid4()), '') assert authorization is None def test_find_all_user_authorizations(session): # pylint:disable=unused-argument """Test find all user authoirzations.""" user = factory_user_model() org = factory_org_model() membership = factory_membership_model(user.id, org.id) entity = factory_entity_model() factory_affiliation_model(entity.id, org.id) authorizations = Authorization.find_all_authorizations_for_user(str(user.keycloak_guid)) assert authorizations is not None assert authorizations[0].org_membership == membership.membership_type_code assert authorizations[0].business_identifier == entity.business_identifier def test_find_all_user_authorizations_for_empty(session): # pylint:disable=unused-argument """Test with invalid user id and assert that auth is None.""" user = factory_user_model() org = factory_org_model() factory_membership_model(user.id, org.id) authorizations = Authorization.find_all_authorizations_for_user(str(user.keycloak_guid)) assert authorizations is not None assert authorizations[0].business_identifier is None
46.934343
120
0.745723
1,170
9,293
5.611966
0.128205
0.029698
0.025586
0.070058
0.844959
0.830643
0.794243
0.788151
0.786476
0.786476
0
0.003438
0.186162
9,293
197
121
47.172589
0.864604
0.242441
0
0.783333
0
0
0.009984
0
0
0
0
0
0.183333
1
0.1
false
0
0.033333
0
0.133333
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
6a7af97af6e8180b0f42b729f208f58cc06e4962
3,814
py
Python
tesserakti/models.py
sainioan/extractiontool
9908b7ff1915b00a5721405a48b13d941442e1dd
[ "MIT" ]
2
2021-05-18T17:25:06.000Z
2021-05-28T04:24:16.000Z
tesserakti/models.py
sainioan/extractiontool
9908b7ff1915b00a5721405a48b13d941442e1dd
[ "MIT" ]
38
2021-01-20T09:38:37.000Z
2021-05-15T13:10:05.000Z
tesserakti/models.py
sainioan/extractiontool
9908b7ff1915b00a5721405a48b13d941442e1dd
[ "MIT" ]
3
2021-01-20T13:18:31.000Z
2021-02-25T13:34:49.000Z
# This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior # * Remove `managed = True` lines if you wish to allow Django to create, modify, and delete the table # Feel free to rename the models, but don't rename db_table values or field names. from django.db import models from django.utils import timezone import pytz class Page(models.Model): page_id = models.IntegerField(db_index=True) document_id = models.IntegerField(db_index=True) vasen = models.IntegerField(db_index=True) top = models.IntegerField(db_index=True) width = models.IntegerField(db_index=True) height = models.IntegerField(db_index=True) created = models.DateTimeField(default=timezone.now) class Meta: managed = True db_table = 'tes_page' unique_together = (('page_id', 'document_id'),) class Block(models.Model): block_id = models.IntegerField(db_index=True) page_id = models.IntegerField(db_index=True) document_id = models.IntegerField(db_index=True) vasen = models.IntegerField(db_index=True) top = models.IntegerField(db_index=True) width = models.IntegerField(db_index=True) height = models.IntegerField(db_index=True) created = models.DateTimeField(default=timezone.now) class Meta: managed = True db_table = 'tes_block' unique_together = (('block_id', 'page_id', 'document_id'),) class Paragraph(models.Model): paragraph_id = models.IntegerField(db_index=True) block_id = models.IntegerField(db_index=True) page_id = models.IntegerField(db_index=True) document_id = models.IntegerField(db_index=True) vasen = models.IntegerField(db_index=True) top = models.IntegerField(db_index=True) width = models.IntegerField(db_index=True) height = models.IntegerField(db_index=True) created = models.DateTimeField(default=timezone.now) class Meta: managed = True db_table = 'tes_paragraph' unique_together = (('paragraph_id', 'block_id', 'page_id', 'document_id'),) class Line(models.Model): line_id = models.IntegerField(db_index=True) paragraph_id = models.IntegerField(db_index=True) block_id = models.IntegerField(db_index=True) page_id = models.IntegerField(db_index=True) document_id = models.IntegerField(db_index=True) vasen = models.IntegerField(db_index=True) top = models.IntegerField(db_index=True) width = models.IntegerField(db_index=True) height = models.IntegerField(db_index=True) created = models.DateTimeField(default=timezone.now) class Meta: managed = True db_table = 'tes_line' unique_together = (('line_id', 'paragraph_id', 'block_id', 'page_id', 'document_id'),) class Word(models.Model): word_id = models.IntegerField(db_index=True) line_id = models.IntegerField(db_index=True) paragraph_id = models.IntegerField(db_index=True) block_id = models.IntegerField(db_index=True) page_id = models.IntegerField(db_index=True) document_id = models.IntegerField(db_index=True) vasen = models.IntegerField(db_index=True) top = models.IntegerField(db_index=True) width = models.IntegerField(db_index=True) height = models.IntegerField(db_index=True) conf = models.IntegerField(db_index=True) text = models.CharField(max_length=200, db_index=True) created = models.DateTimeField(default=timezone.now) class Meta: managed = True db_table = 'tes_word' unique_together = (('word_id', 'line_id', 'paragraph_id', 'block_id', 'page_id', 'document_id'),)
38.918367
105
0.719455
508
3,814
5.202756
0.179134
0.111237
0.174801
0.387817
0.754067
0.735149
0.72342
0.712826
0.712826
0.696179
0
0.000954
0.175406
3,814
97
106
39.319588
0.839428
0.122968
0
0.72
1
0
0.067426
0
0
0
0
0
0
1
0
false
0
0.04
0
0.8
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
9
6a9134622536fb13668a176be9026fcf7777a42a
20,550
py
Python
tests/milvus_python_test/test_get_vector_ids.py
youny626/milvus
9e55802c5d515ceecc4cadab9f2fd1cb477d75d5
[ "Apache-2.0" ]
null
null
null
tests/milvus_python_test/test_get_vector_ids.py
youny626/milvus
9e55802c5d515ceecc4cadab9f2fd1cb477d75d5
[ "Apache-2.0" ]
null
null
null
tests/milvus_python_test/test_get_vector_ids.py
youny626/milvus
9e55802c5d515ceecc4cadab9f2fd1cb477d75d5
[ "Apache-2.0" ]
1
2021-07-08T07:22:59.000Z
2021-07-08T07:22:59.000Z
import time import random import pdb import threading import logging from multiprocessing import Pool, Process import pytest from milvus import IndexType, MetricType from utils import * dim = 128 index_file_size = 10 GET_TIMEOUT = 30 nprobe = 1 top_k = 1 epsilon = 0.001 tag = "1970-01-01" nb = 6000 class TestGetVectorIdsBase: def get_valid_segment_name(self, connect, table): vectors = gen_vector(nb, dim) status, ids = connect.add_vectors(table, vectors) assert status.OK() status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() logging.getLogger().info(info.partitions_stat[0].segments_stat[0].segment_name) return info.partitions_stat[0].segments_stat[0].segment_name """ ****************************************************************** The following cases are used to test `get_vector_ids` function ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_table_name_None(self, connect, table): ''' target: get vector ids where table name is None method: call get_vector_ids with the table_name: None expected: exception raised ''' table_name = None segment_name = self.get_valid_segment_name(connect, table) with pytest.raises(Exception) as e: status, vector_ids = connect.get_vector_ids(table_name, segment_name) @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_table_name_not_existed(self, connect, table): ''' target: get vector ids where table name does not exist method: call get_vector_ids with a random table_name, which is not in db expected: status not ok ''' table_name = gen_unique_str("not_existed_table") segment_name = self.get_valid_segment_name(connect, table) status, vector_ids = connect.get_vector_ids(table_name, segment_name) assert not status.OK() @pytest.fixture( scope="function", params=gen_invalid_table_names() ) def get_table_name(self, request): yield request.param @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_table_name_invalid(self, connect, table, get_table_name): ''' target: get vector ids where table name is invalid method: call get_vector_ids with invalid table_name expected: status not ok ''' table_name = get_table_name segment_name = self.get_valid_segment_name(connect, table) status, vector_ids = connect.get_vector_ids(table_name, segment_name) assert not status.OK() @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_segment_name_None(self, connect, table): ''' target: get vector ids where segment name is None method: call get_vector_ids with the segment_name: None expected: exception raised ''' valid_segment_name = self.get_valid_segment_name(connect, table) segment = None with pytest.raises(Exception) as e: status, vector_ids = connect.get_vector_ids(table, segment) @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_segment_name_not_existed(self, connect, table): ''' target: get vector ids where segment name does not exist method: call get_vector_ids with a random segment name expected: status not ok ''' valid_segment_name = self.get_valid_segment_name(connect, table) segment = gen_unique_str("not_existed_segment") status, vector_ids = connect.get_vector_ids(table, segment) logging.getLogger().info(vector_ids) assert not status.OK() @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_A(self, connect, table): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' vectors = gen_vector(10, dim) status, ids = connect.add_vectors(table, vectors) assert status.OK() status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_B(self, connect, table): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(table, tag) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.fixture( scope="function", params=gen_simple_index_params() ) def get_simple_index_params(self, request, connect): if str(connect._cmd("mode")[1]) == "CPU": if request.param["index_type"] not in [IndexType.IVF_SQ8, IndexType.IVFLAT, IndexType.FLAT]: pytest.skip("Only support index_type: flat/ivf_flat/ivf_sq8") else: pytest.skip("Only support CPU mode") return request.param @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_A(self, connect, table, get_simple_index_params): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' index_params = get_simple_index_params status = connect.create_index(table, index_params) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(table, vectors) assert status.OK() status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_B(self, connect, table, get_simple_index_params): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(table, tag) assert status.OK() index_params = get_simple_index_params status = connect.create_index(table, index_params) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_after_delete_vectors(self, connect, table): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' vectors = gen_vector(2, dim) status, ids = connect.add_vectors(table, vectors) assert status.OK() delete_ids = [ids[0]] status = connect.delete_by_id(table, delete_ids) status = connect.flush([table]) assert status.OK() status, info = connect.table_info(table) assert status.OK() status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1] class TestGetVectorIdsIP: """ ****************************************************************** The following cases are used to test `get_vector_ids` function ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_A(self, connect, ip_table): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' vectors = gen_vector(10, dim) status, ids = connect.add_vectors(ip_table, vectors) assert status.OK() status = connect.flush([ip_table]) assert status.OK() status, info = connect.table_info(ip_table) assert status.OK() status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_B(self, connect, ip_table): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(ip_table, tag) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([ip_table]) assert status.OK() status, info = connect.table_info(ip_table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.fixture( scope="function", params=gen_simple_index_params() ) def get_simple_index_params(self, request, connect): if str(connect._cmd("mode")[1]) == "CPU": if request.param["index_type"] not in [IndexType.IVF_SQ8, IndexType.IVFLAT, IndexType.FLAT]: pytest.skip("Only support index_type: flat/ivf_flat/ivf_sq8") else: pytest.skip("Only support CPU mode") return request.param @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_A(self, connect, ip_table, get_simple_index_params): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' index_params = get_simple_index_params status = connect.create_index(ip_table, index_params) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(ip_table, vectors) assert status.OK() status = connect.flush([ip_table]) assert status.OK() status, info = connect.table_info(ip_table) assert status.OK() status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_B(self, connect, ip_table, get_simple_index_params): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(ip_table, tag) assert status.OK() index_params = get_simple_index_params status = connect.create_index(ip_table, index_params) assert status.OK() vectors = gen_vector(10, dim) status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([ip_table]) assert status.OK() status, info = connect.table_info(ip_table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_after_delete_vectors(self, connect, ip_table): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' vectors = gen_vector(2, dim) status, ids = connect.add_vectors(ip_table, vectors) assert status.OK() delete_ids = [ids[0]] status = connect.delete_by_id(ip_table, delete_ids) status = connect.flush([ip_table]) assert status.OK() status, info = connect.table_info(ip_table) assert status.OK() status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1] class TestGetVectorIdsJAC: """ ****************************************************************** The following cases are used to test `get_vector_ids` function ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_A(self, connect, jac_table): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' tmp, vectors = gen_binary_vectors(10, dim) status, ids = connect.add_vectors(jac_table, vectors) assert status.OK() status = connect.flush([jac_table]) assert status.OK() status, info = connect.table_info(jac_table) assert status.OK() status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_without_index_B(self, connect, jac_table): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(jac_table, tag) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([jac_table]) assert status.OK() status, info = connect.table_info(jac_table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.fixture( scope="function", params=gen_simple_index_params() ) def get_jaccard_index_params(self, request, connect): logging.getLogger().info(request.param) if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: return request.param else: pytest.skip("Skip index Temporary") @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_A(self, connect, jac_table, get_jaccard_index_params): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' index_params = get_jaccard_index_params status = connect.create_index(jac_table, index_params) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) status, ids = connect.add_vectors(jac_table, vectors) assert status.OK() status = connect.flush([jac_table]) assert status.OK() status, info = connect.table_info(jac_table) assert status.OK() status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_with_index_B(self, connect, jac_table, get_jaccard_index_params): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' status = connect.create_partition(jac_table, tag) assert status.OK() index_params = get_jaccard_index_params status = connect.create_index(jac_table, index_params) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) assert status.OK() status = connect.flush([jac_table]) assert status.OK() status, info = connect.table_info(jac_table) assert status.OK() assert info.partitions_stat[1].tag == tag status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) def test_get_vector_ids_after_delete_vectors(self, connect, jac_table): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' tmp, vectors = gen_binary_vectors(2, dim) status, ids = connect.add_vectors(jac_table, vectors) assert status.OK() delete_ids = [ids[0]] status = connect.delete_by_id(jac_table, delete_ids) status = connect.flush([jac_table]) assert status.OK() status, info = connect.table_info(jac_table) assert status.OK() status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1]
41.768293
117
0.644234
2,690
20,550
4.686245
0.05539
0.106378
0.07901
0.060289
0.944947
0.925115
0.912978
0.912899
0.909805
0.895129
0
0.010234
0.24871
20,550
492
118
41.768293
0.80627
0.188516
0
0.804805
0
0
0.018313
0.002689
0
0
0
0
0.297297
1
0.075075
false
0
0.027027
0
0.123123
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0a74725d49b5d1cf081d9a4679cf1f13feebb585
6,581
py
Python
src/genie/libs/parser/iosxr/tests/ShowIgmpGroupsDetails/cli/equal/golden_output_2_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
204
2018-06-27T00:55:27.000Z
2022-03-06T21:12:18.000Z
src/genie/libs/parser/iosxr/tests/ShowIgmpGroupsDetails/cli/equal/golden_output_2_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
468
2018-06-19T00:33:18.000Z
2022-03-31T23:23:35.000Z
src/genie/libs/parser/iosxr/tests/ShowIgmpGroupsDetails/cli/equal/golden_output_2_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
309
2019-01-16T20:21:07.000Z
2022-03-30T12:56:41.000Z
expected_output = { "vrf": { "VRF1": { "interfaces": { "Loopback300": { "group": { "224.0.0.2": { "host_mode": "exclude", "last_reporter": "10.16.2.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" }, "224.0.0.9": { "host_mode": "exclude", "last_reporter": "10.16.2.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "09:48:07" }, "224.0.0.13": { "host_mode": "exclude", "last_reporter": "10.16.2.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" }, "224.0.0.22": { "host_mode": "exclude", "last_reporter": "10.16.2.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" } } }, "GigabitEthernet0/0/0/0.390": { "group": { "224.0.0.10": { "host_mode": "exclude", "last_reporter": "0.0.0.0", "router_mode": "INCLUDE", "router_mode_expires": "None", "suppress": 0, "up_time": "01:54:16" } } }, "GigabitEthernet0/0/0/0.410": { "group": { "224.0.0.2": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" }, "224.0.0.5": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "10:37:41" }, "224.0.0.6": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "10:37:41" }, "224.0.0.13": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" }, "224.0.0.22": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" }, "224.0.1.39": { "host_mode": "include", "last_reporter": "10.12.110.1", "router_mode": "EXCLUDE", "router_mode_expires": "00:01:21", "suppress": 0, "up_time": "02:30:06" }, "224.0.1.40": { "host_mode": "exclude", "last_reporter": "10.12.110.2", "router_mode": "EXCLUDE", "router_mode_expires": "never", "suppress": 0, "up_time": "02:43:30" } } }, "GigabitEthernet0/0/0/0.420": { "group": { "224.0.0.9": { "host_mode": "exclude", "last_reporter": "0.0.0.0", "router_mode": "INCLUDE", "router_mode_expires": "None", "suppress": 0, "up_time": "09:48:07" } } }, "GigabitEthernet0/0/0/1.390": { "group": { "224.0.0.10": { "host_mode": "exclude", "last_reporter": "0.0.0.0", "router_mode": "INCLUDE", "router_mode_expires": "None", "suppress": 0, "up_time": "01:54:16" } } }, "GigabitEthernet0/0/0/1.420": { "group": { "224.0.0.9": { "host_mode": "exclude", "last_reporter": "0.0.0.0", "router_mode": "INCLUDE", "router_mode_expires": "None", "suppress": 0, "up_time": "09:48:07" } } } } } } }
42.185897
62
0.268804
444
6,581
3.779279
0.117117
0.039333
0.151967
0.134088
0.938021
0.916567
0.896305
0.892729
0.892729
0.892133
0
0.133696
0.609026
6,581
155
63
42.458065
0.518461
0
0
0.677632
0
0
0.285953
0.019763
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0a882154a2908e4e95afec0f02c237ae262fd917
5,724
py
Python
dojo/finding/queries.py
axelpavageau/django-DefectDojo
00b425742b783ada0f432241c2812ac1257feb73
[ "BSD-3-Clause" ]
1,772
2018-01-22T23:32:15.000Z
2022-03-31T14:49:33.000Z
dojo/finding/queries.py
axelpavageau/django-DefectDojo
00b425742b783ada0f432241c2812ac1257feb73
[ "BSD-3-Clause" ]
3,461
2018-01-20T19:12:28.000Z
2022-03-31T17:14:39.000Z
dojo/finding/queries.py
axelpavageau/django-DefectDojo
00b425742b783ada0f432241c2812ac1257feb73
[ "BSD-3-Clause" ]
1,173
2018-01-23T07:10:23.000Z
2022-03-31T14:40:43.000Z
from crum import get_current_user from django.conf import settings from django.db.models import Exists, OuterRef, Q from dojo.models import Finding, Product_Member, Product_Type_Member, Stub_Finding, \ Product_Group, Product_Type_Group from dojo.authorization.authorization import get_roles_for_permission, role_has_permission, \ get_groups def get_authorized_findings(permission, queryset=None, user=None): if user is None: user = get_current_user() if user is None: return Finding.objects.none() if queryset is None: findings = Finding.objects.all() else: findings = queryset if user.is_superuser: return findings if settings.FEATURE_AUTHORIZATION_V2: if user.is_staff and settings.AUTHORIZATION_STAFF_OVERRIDE: return findings if hasattr(user, 'global_role') and user.global_role.role is not None and role_has_permission(user.global_role.role.id, permission): return findings for group in get_groups(user): if hasattr(group, 'global_role') and group.global_role.role is not None and role_has_permission(group.global_role.role.id, permission): return findings roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( product_type=OuterRef('test__engagement__product__prod_type_id'), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( product=OuterRef('test__engagement__product_id'), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( product_type=OuterRef('test__engagement__product__prod_type_id'), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( product=OuterRef('test__engagement__product_id'), group__users=user, role__in=roles) findings = findings.annotate( test__engagement__product__prod_type__member=Exists(authorized_product_type_roles), test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)) findings = findings.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) else: if not user.is_staff: findings = findings.filter( Q(test__engagement__product__authorized_users__in=[user]) | Q(test__engagement__product__prod_type__authorized_users__in=[user])) return findings def get_authorized_stub_findings(permission): user = get_current_user() if user is None: return Stub_Finding.objects.none() if user.is_superuser: return Stub_Finding.objects.all() if settings.FEATURE_AUTHORIZATION_V2: if user.is_staff and settings.AUTHORIZATION_STAFF_OVERRIDE: return Stub_Finding.objects.all() if hasattr(user, 'global_role') and user.global_role.role is not None and role_has_permission(user.global_role.role.id, permission): return Stub_Finding.objects.all() for group in get_groups(user): if hasattr(group, 'global_role') and group.global_role.role is not None and role_has_permission(group.global_role.role.id, permission): return Stub_Finding.objects.all() roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( product_type=OuterRef('test__engagement__product__prod_type_id'), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( product=OuterRef('test__engagement__product_id'), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( product_type=OuterRef('test__engagement__product__prod_type_id'), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( product=OuterRef('test__engagement__product_id'), group__users=user, role__in=roles) findings = Stub_Finding.objects.annotate( test__engagement__product__prod_type__member=Exists(authorized_product_type_roles), test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)) findings = findings.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) else: if user.is_staff: findings = Stub_Finding.objects.all() else: findings = Stub_Finding.objects.filter( Q(test__engagement__product__authorized_users__in=[user]) | Q(test__engagement__product__prod_type__authorized_users__in=[user])) return findings
45.070866
147
0.710517
672
5,724
5.49256
0.084821
0.106204
0.159306
0.094825
0.850718
0.824167
0.811162
0.802493
0.802493
0.782986
0
0.00045
0.222746
5,724
126
148
45.428571
0.829175
0
0
0.814815
0
0
0.054507
0.04682
0
0
0
0
0
1
0.018519
false
0
0.046296
0
0.175926
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0a8dd63f67d9920ffc57c5128e72020819dc6deb
421
py
Python
type.py
jeffb4real/scripts
349bc3d3d819684261281a05db7a5b9389d664f1
[ "MIT" ]
null
null
null
type.py
jeffb4real/scripts
349bc3d3d819684261281a05db7a5b9389d664f1
[ "MIT" ]
null
null
null
type.py
jeffb4real/scripts
349bc3d3d819684261281a05db7a5b9389d664f1
[ "MIT" ]
null
null
null
#!/usr/bin/env python a = 5 if type(a) is int: print 'a = {} and it is an int'.format(a) else: print 'a = {} and it is not an int'.format(a) a = '5' if type(a) is int: print 'a = {} and it is an int'.format(a) else: print 'a = {} and it is not an int'.format(a) a = int(a) + 2 if type(a) is int: print 'a = {} and it is an int'.format(a) else: print 'a = {} and it is not an int'.format(a)
17.541667
49
0.551069
87
421
2.666667
0.206897
0.155172
0.232759
0.284483
0.913793
0.913793
0.913793
0.913793
0.913793
0.913793
0
0.009836
0.275534
421
23
50
18.304348
0.75082
0.047506
0
0.8
0
0
0.381313
0
0
0
0
0
0
0
null
null
0
0
null
null
0.4
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
10
0aacbe3ad25f7090d9596083c8df5263dd88b943
71,821
py
Python
tst/schedulers/bayesopt/test_iss_model.py
hfurkanbozkurt/syne-tune
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tst/schedulers/bayesopt/test_iss_model.py
hfurkanbozkurt/syne-tune
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
[ "ECL-2.0", "Apache-2.0" ]
1
2022-02-25T15:56:36.000Z
2022-02-25T17:53:10.000Z
tst/schedulers/bayesopt/test_iss_model.py
hfurkanbozkurt/syne-tune
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from typing import Dict import json import numpy as np import pytest from syne_tune.optimizer.schedulers.searchers.gp_searcher_factory import \ gp_multifidelity_searcher_defaults, gp_multifidelity_searcher_factory from syne_tune.optimizer.schedulers.searchers.utils.default_arguments \ import check_and_merge_defaults from syne_tune.optimizer.schedulers.searchers.gp_searcher_utils import \ decode_state_from_old_encoding from syne_tune.config_space import randint, uniform, loguniform def _common_kwargs(config_space: Dict) -> Dict: return { 'config_space': config_space, 'max_epochs': config_space['epochs'], 'metric': 'accuracy', 'resource_attr': 'epoch', 'scheduler': 'hyperband_stopping', 'scheduler_mode': 'max', 'debug_log': False, 'normalize_targets': True, } def build_gpiss_model_factory( config_space: Dict, model_params: Dict, **kwargs): kwargs = dict( _common_kwargs(config_space), model='gp_issm', issm_gamma_one=False, **kwargs) _kwargs = check_and_merge_defaults( kwargs, *gp_multifidelity_searcher_defaults(), dict_name='search_options') kwargs_int = gp_multifidelity_searcher_factory(**_kwargs) # Need to convert `model_params` kwargs_int['model_factory'].set_params(model_params) return kwargs_int # We ran launch_sample_searcher_states.py to sample the searcher states # used here, which runs MOBSTER (hyperband_stopping, bayesopt) with the # mlp_fashionmnist_benchmark _model_params = [] _state = [] _model_params.append('{"noise_variance": 0.008381548138906916, "kernel_inv_bw0": 0.004177002691678498, "kernel_inv_bw1": 0.000402494802013946, "kernel_inv_bw2": 0.00036005844016162423, "kernel_inv_bw3": 4.278552430496177, "kernel_inv_bw4": 0.38190450370225937, "kernel_inv_bw5": 0.0001674608736118065, "kernel_inv_bw6": 0.5371572608999335, "kernel_covariance_scale": 1.0487725555603677, "mean_mean_value": -0.37162308332346305, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.18364130320022903, "issm_beta": 1.1069304811899965}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}, {"candidate": {"n_units_1": 501, "n_units_2": 601, "batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "wd": 8.340962845965557e-07}, "metrics": {"cost_metric": {"1": 38.63721990585327}, "active_metric": {"1": 0.904561824729892}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08}, "metrics": {"cost_metric": {"1": 44.914865016937256, "2": 95.70968389511108, "3": 134.2296760082245, "4": 167.57774996757507, "5": 205.92636585235596, "6": 242.58663892745972, "7": 283.2623338699341, "8": 326.5033459663391, "9": 360.76255893707275, "10": 398.4191679954529, "11": 434.4982190132141}, "active_metric": {"1": 0.15477145148356053, "2": 0.13502405773857262, "3": 0.1245990376904571, "4": 0.12840817963111473, "5": 0.12219326383319973, "6": 0.11778267842822776, "7": 0.1133720930232558, "8": 0.11166800320769843, "9": 0.10976343223736973, "10": 0.1067562149157979, "11": 0.10555332798716921}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08}, "metrics": {"cost_metric": {"1": 50.98606991767883, "2": 92.16159892082214, "3": 123.69115900993347, "4": 154.21311402320862, "5": 192.06259202957153, "6": 227.72296023368835, "7": 263.6407790184021, "8": 304.7051441669464, "9": 336.6670799255371, "10": 372.1759181022644, "11": 405.45882201194763}, "active_metric": {"1": 0.1700441412520064, "2": 0.1407504012841091, "3": 0.1359349919743178, "4": 0.131922150882825, "5": 0.1220906902086677, "6": 0.1213884430176565, "7": 0.1182784911717496, "8": 0.120284911717496, "9": 0.1221910112359551, "10": 0.1134630818619583, "11": 0.1151685393258427}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 10}, {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08, "RESOURCE_ATTR_epoch": 12}, {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 12}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 36}]}') # elapsed_time = 595.700856924057 # num_observations = 73 # num_configs = 11 _model_params.append('{"noise_variance": 0.008381548138906916, "kernel_inv_bw0": 0.004177002691678498, "kernel_inv_bw1": 0.000402494802013946, "kernel_inv_bw2": 0.00036005844016162423, "kernel_inv_bw3": 4.278552430496177, "kernel_inv_bw4": 0.38190450370225937, "kernel_inv_bw5": 0.0001674608736118065, "kernel_inv_bw6": 0.5371572608999335, "kernel_covariance_scale": 1.0487725555603677, "mean_mean_value": -0.37162308332346305, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.18364130320022903, "issm_beta": 1.1069304811899965}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}, {"candidate": {"n_units_1": 501, "n_units_2": 601, "batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "wd": 8.340962845965557e-07}, "metrics": {"cost_metric": {"1": 38.63721990585327}, "active_metric": {"1": 0.904561824729892}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08}, "metrics": {"cost_metric": {"1": 44.914865016937256, "2": 95.70968389511108, "3": 134.2296760082245, "4": 167.57774996757507, "5": 205.92636585235596, "6": 242.58663892745972, "7": 283.2623338699341, "8": 326.5033459663391, "9": 360.76255893707275, "10": 398.4191679954529, "11": 434.4982190132141}, "active_metric": {"1": 0.15477145148356053, "2": 0.13502405773857262, "3": 0.1245990376904571, "4": 0.12840817963111473, "5": 0.12219326383319973, "6": 0.11778267842822776, "7": 0.1133720930232558, "8": 0.11166800320769843, "9": 0.10976343223736973, "10": 0.1067562149157979, "11": 0.10555332798716921}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08}, "metrics": {"cost_metric": {"1": 50.98606991767883, "2": 92.16159892082214, "3": 123.69115900993347, "4": 154.21311402320862, "5": 192.06259202957153, "6": 227.72296023368835, "7": 263.6407790184021, "8": 304.7051441669464, "9": 336.6670799255371, "10": 372.1759181022644, "11": 405.45882201194763}, "active_metric": {"1": 0.1700441412520064, "2": 0.1407504012841091, "3": 0.1359349919743178, "4": 0.131922150882825, "5": 0.1220906902086677, "6": 0.1213884430176565, "7": 0.1182784911717496, "8": 0.120284911717496, "9": 0.1221910112359551, "10": 0.1134630818619583, "11": 0.1151685393258427}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 9}, {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08, "RESOURCE_ATTR_epoch": 10}, {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 10}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 31}]}') # elapsed_time = 520.2518529891968 # num_observations = 63 # num_configs = 11 _model_params.append('{"noise_variance": 0.008381548138906916, "kernel_inv_bw0": 0.004177002691678498, "kernel_inv_bw1": 0.000402494802013946, "kernel_inv_bw2": 0.00036005844016162423, "kernel_inv_bw3": 4.278552430496177, "kernel_inv_bw4": 0.38190450370225937, "kernel_inv_bw5": 0.0001674608736118065, "kernel_inv_bw6": 0.5371572608999335, "kernel_covariance_scale": 1.0487725555603677, "mean_mean_value": -0.37162308332346305, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.18364130320022903, "issm_beta": 1.1069304811899965}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}, {"candidate": {"n_units_1": 501, "n_units_2": 601, "batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "wd": 8.340962845965557e-07}, "metrics": {"cost_metric": {"1": 38.63721990585327}, "active_metric": {"1": 0.904561824729892}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08}, "metrics": {"cost_metric": {"1": 44.914865016937256, "2": 95.70968389511108, "3": 134.2296760082245, "4": 167.57774996757507, "5": 205.92636585235596, "6": 242.58663892745972, "7": 283.2623338699341, "8": 326.5033459663391, "9": 360.76255893707275, "10": 398.4191679954529, "11": 434.4982190132141}, "active_metric": {"1": 0.15477145148356053, "2": 0.13502405773857262, "3": 0.1245990376904571, "4": 0.12840817963111473, "5": 0.12219326383319973, "6": 0.11778267842822776, "7": 0.1133720930232558, "8": 0.11166800320769843, "9": 0.10976343223736973, "10": 0.1067562149157979, "11": 0.10555332798716921}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08}, "metrics": {"cost_metric": {"1": 50.98606991767883, "2": 92.16159892082214, "3": 123.69115900993347, "4": 154.21311402320862, "5": 192.06259202957153, "6": 227.72296023368835, "7": 263.6407790184021, "8": 304.7051441669464, "9": 336.6670799255371, "10": 372.1759181022644, "11": 405.45882201194763}, "active_metric": {"1": 0.1700441412520064, "2": 0.1407504012841091, "3": 0.1359349919743178, "4": 0.131922150882825, "5": 0.1220906902086677, "6": 0.1213884430176565, "7": 0.1182784911717496, "8": 0.120284911717496, "9": 0.1221910112359551, "10": 0.1134630818619583, "11": 0.1151685393258427}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 8}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 27}, {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 9}, {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08, "RESOURCE_ATTR_epoch": 9}]}') # elapsed_time = 469.9041178226471 # num_observations = 56 # num_configs = 11 _model_params.append('{"noise_variance": 0.008381548138906916, "kernel_inv_bw0": 0.004177002691678498, "kernel_inv_bw1": 0.000402494802013946, "kernel_inv_bw2": 0.00036005844016162423, "kernel_inv_bw3": 4.278552430496177, "kernel_inv_bw4": 0.38190450370225937, "kernel_inv_bw5": 0.0001674608736118065, "kernel_inv_bw6": 0.5371572608999335, "kernel_covariance_scale": 1.0487725555603677, "mean_mean_value": -0.37162308332346305, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.18364130320022903, "issm_beta": 1.1069304811899965}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}, {"candidate": {"n_units_1": 501, "n_units_2": 601, "batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "wd": 8.340962845965557e-07}, "metrics": {"cost_metric": {"1": 38.63721990585327}, "active_metric": {"1": 0.904561824729892}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08}, "metrics": {"cost_metric": {"1": 44.914865016937256, "2": 95.70968389511108, "3": 134.2296760082245, "4": 167.57774996757507, "5": 205.92636585235596, "6": 242.58663892745972, "7": 283.2623338699341, "8": 326.5033459663391, "9": 360.76255893707275, "10": 398.4191679954529, "11": 434.4982190132141}, "active_metric": {"1": 0.15477145148356053, "2": 0.13502405773857262, "3": 0.1245990376904571, "4": 0.12840817963111473, "5": 0.12219326383319973, "6": 0.11778267842822776, "7": 0.1133720930232558, "8": 0.11166800320769843, "9": 0.10976343223736973, "10": 0.1067562149157979, "11": 0.10555332798716921}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08}, "metrics": {"cost_metric": {"1": 50.98606991767883, "2": 92.16159892082214, "3": 123.69115900993347, "4": 154.21311402320862, "5": 192.06259202957153, "6": 227.72296023368835, "7": 263.6407790184021, "8": 304.7051441669464, "9": 336.6670799255371, "10": 372.1759181022644, "11": 405.45882201194763}, "active_metric": {"1": 0.1700441412520064, "2": 0.1407504012841091, "3": 0.1359349919743178, "4": 0.131922150882825, "5": 0.1220906902086677, "6": 0.1213884430176565, "7": 0.1182784911717496, "8": 0.120284911717496, "9": 0.1221910112359551, "10": 0.1134630818619583, "11": 0.1151685393258427}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 7}, {"n_units_1": 1024, "n_units_2": 707, "batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "wd": 1e-08, "RESOURCE_ATTR_epoch": 5}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 20}, {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 6}]}') # elapsed_time = 349.2686309814453 # num_observations = 41 # num_configs = 11 _model_params.append('{"noise_variance": 0.008381548138906916, "kernel_inv_bw0": 0.004177002691678498, "kernel_inv_bw1": 0.000402494802013946, "kernel_inv_bw2": 0.00036005844016162423, "kernel_inv_bw3": 4.278552430496177, "kernel_inv_bw4": 0.38190450370225937, "kernel_inv_bw5": 0.0001674608736118065, "kernel_inv_bw6": 0.5371572608999335, "kernel_covariance_scale": 1.0487725555603677, "mean_mean_value": -0.37162308332346305, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.18364130320022903, "issm_beta": 1.1069304811899965}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}, {"candidate": {"n_units_1": 501, "n_units_2": 601, "batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "wd": 8.340962845965557e-07}, "metrics": {"cost_metric": {"1": 38.63721990585327}, "active_metric": {"1": 0.904561824729892}}}, {"candidate": {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08}, "metrics": {"cost_metric": {"1": 44.914865016937256, "2": 95.70968389511108, "3": 134.2296760082245, "4": 167.57774996757507, "5": 205.92636585235596, "6": 242.58663892745972, "7": 283.2623338699341, "8": 326.5033459663391, "9": 360.76255893707275, "10": 398.4191679954529, "11": 434.4982190132141}, "active_metric": {"1": 0.15477145148356053, "2": 0.13502405773857262, "3": 0.1245990376904571, "4": 0.12840817963111473, "5": 0.12219326383319973, "6": 0.11778267842822776, "7": 0.1133720930232558, "8": 0.11166800320769843, "9": 0.10976343223736973, "10": 0.1067562149157979, "11": 0.10555332798716921}}}], "failed_candidates": [], "pending_candidates": [{"batch_size": 89, "dropout_1": 0.19654676887125966, "dropout_2": 0.8682666451901773, "learning_rate": 0.00031134631996358774, "n_units_1": 1024, "n_units_2": 707, "wd": 1e-08, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 5}, {"n_units_1": 1024, "n_units_2": 1002, "batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 2}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 12}]}') # elapsed_time = 203.53759908676147 # num_observations = 23 # num_configs = 10 _model_params.append('{"noise_variance": 0.012624704488939506, "kernel_inv_bw0": 0.0026714958295617746, "kernel_inv_bw1": 0.002294225496133934, "kernel_inv_bw2": 0.0005810444910329019, "kernel_inv_bw3": 4.756569311119674, "kernel_inv_bw4": 0.41912704911412996, "kernel_inv_bw5": 0.007082508117597436, "kernel_inv_bw6": 0.6008226671164758, "kernel_covariance_scale": 1.2790537663629489, "mean_mean_value": -0.29754767463440174, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.20709875141786813, "issm_beta": 1.1564145320327957}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}, {"candidate": {"n_units_1": 688, "n_units_2": 597, "batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "wd": 4.489784182359429e-08}, "metrics": {"cost_metric": {"1": 30.61675500869751}, "active_metric": {"1": 0.8976211984342066}}}], "failed_candidates": [], "pending_candidates": [{"batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "n_units_1": 501, "n_units_2": 601, "wd": 8.340962845965557e-07, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 4}, {"batch_size": 116, "dropout_1": 0.0366350257842321, "dropout_2": 0.6883751950302733, "learning_rate": 0.0003133897834907133, "n_units_1": 1024, "n_units_2": 1002, "wd": 1.1611672813117278e-08, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 9}]}') # elapsed_time = 141.0116560459137 # num_observations = 17 # num_configs = 8 _model_params.append('{"noise_variance": 0.02443305886195063, "kernel_inv_bw0": 0.01410539584512635, "kernel_inv_bw1": 1.4106734173901074, "kernel_inv_bw2": 0.002912772873874073, "kernel_inv_bw3": 0.00010000000000000009, "kernel_inv_bw4": 0.001289783525647755, "kernel_inv_bw5": 6.274402643366595, "kernel_inv_bw6": 0.014263119266637505, "kernel_covariance_scale": 1.0004606474604771, "mean_mean_value": -1.0965610760358047, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.6394260638653898, "issm_beta": 0.8896093870386187}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}, {"candidate": {"n_units_1": 672, "n_units_2": 820, "batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "wd": 0.002536625472111785}, "metrics": {"cost_metric": {"1": 32.458306074142456}, "active_metric": {"1": 0.24486714975845414}}}], "failed_candidates": [], "pending_candidates": [{"batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "n_units_1": 688, "n_units_2": 597, "wd": 4.489784182359429e-08, "RESOURCE_ATTR_epoch": 1}, {"batch_size": 34, "dropout_1": 0.7410256603874262, "dropout_2": 0.046625361151571336, "learning_rate": 0.07937041160202492, "n_units_1": 501, "n_units_2": 601, "wd": 8.340962845965557e-07, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 4}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 7}]}') # elapsed_time = 108.29521012306213 # num_observations = 14 # num_configs = 7 _model_params.append('{"noise_variance": 0.02433360380308369, "kernel_inv_bw0": 0.033230128902756034, "kernel_inv_bw1": 1.3832161502574647, "kernel_inv_bw2": 0.0010926642716173997, "kernel_inv_bw3": 0.0009913284444091315, "kernel_inv_bw4": 0.00037318250862594773, "kernel_inv_bw5": 7.150355629993121, "kernel_inv_bw6": 0.005367219098449991, "kernel_covariance_scale": 0.9234243919759128, "mean_mean_value": -1.0950448515295788, "issm_gamma": 0.0010000000000000002, "issm_alpha": -0.6000883418698378, "issm_beta": 0.814092090699343}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}, {"candidate": {"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338}, "metrics": {"cost_metric": {"1": 29.966220140457153}, "active_metric": {"1": 0.6366185897435898}}}], "failed_candidates": [], "pending_candidates": [{"batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "n_units_1": 672, "n_units_2": 820, "wd": 0.002536625472111785, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 3}, {"batch_size": 123, "dropout_1": 0.7829512576762913, "dropout_2": 0.2834197685256876, "learning_rate": 0.1784738929251937, "n_units_1": 688, "n_units_2": 597, "wd": 4.489784182359429e-08, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 6}]}') # elapsed_time = 91.06228709220886 # num_observations = 11 # num_configs = 6 _model_params.append('{"noise_variance": 4.023868196955162e-08, "kernel_inv_bw0": 0.0499484523829736, "kernel_inv_bw1": 0.5041477744353572, "kernel_inv_bw2": 0.045440426051123285, "kernel_inv_bw3": 3.509634264819305, "kernel_inv_bw4": 1.8117976798318889, "kernel_inv_bw5": 16.29050792588867, "kernel_inv_bw6": 0.011845890028904541, "kernel_covariance_scale": 2.72277711886595, "mean_mean_value": -1.412204593314323, "issm_gamma": 0.0010000000000000002, "issm_alpha": -1.7778733115941194, "issm_beta": 1.226405864173305}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}, {"candidate": {"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949}, "metrics": {"cost_metric": {"1": 44.07283306121826}, "active_metric": {"1": 0.23085404971932644}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 673, "n_units_2": 262, "batch_size": 78, "dropout_1": 0.9510740133913004, "dropout_2": 0.3263851441475057, "learning_rate": 0.009715536539110267, "wd": 0.0002984576239921338, "RESOURCE_ATTR_epoch": 1}, {"batch_size": 108, "dropout_1": 0.6443283647430158, "dropout_2": 0.8194904484310889, "learning_rate": 9.196365243521935e-05, "n_units_1": 672, "n_units_2": 820, "wd": 0.002536625472111785, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 3}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 4}]}') # elapsed_time = 55.957242012023926 # num_observations = 8 # num_configs = 5 _model_params.append('{"noise_variance": 0.0010000000000000002, "kernel_inv_bw0": 1.0, "kernel_inv_bw1": 1.0, "kernel_inv_bw2": 1.0, "kernel_inv_bw3": 1.0, "kernel_inv_bw4": 1.0, "kernel_inv_bw5": 1.0, "kernel_inv_bw6": 1.0, "kernel_covariance_scale": 1.0, "mean_mean_value": 0.0, "issm_gamma": 1.0, "issm_alpha": -0.5, "issm_beta": 0.0}') _state.append('{"candidate_evaluations": [{"candidate": {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607}, "metrics": {"cost_metric": {"1": 12.25258493423462, "2": 24.305160999298096, "3": 44.05741477012634, "4": 62.029183864593506, "5": 81.38737893104553, "6": 99.16185593605042, "7": 118.72888779640198, "8": 133.45333671569824, "9": 148.23734402656555, "10": 166.52369689941406, "11": 194.99460196495056, "12": 215.73117184638977, "13": 235.3977439403534, "14": 253.71279788017273, "15": 267.6743288040161, "16": 281.8612160682678, "17": 296.0602250099182, "18": 310.0040330886841, "19": 324.75612902641296, "20": 344.674284696579, "21": 360.0983910560608, "22": 375.9487638473511, "23": 395.81145191192627, "24": 411.6494069099426, "25": 426.79202795028687, "26": 448.74489879608154, "27": 464.90988278388977, "28": 480.28413486480713, "29": 494.5631868839264, "30": 510.31515073776245, "31": 527.6290948390961, "32": 542.7905468940735, "33": 558.1524910926819, "34": 572.6776859760284, "35": 588.3533399105072}, "active_metric": {"1": 0.4978924126856684, "2": 0.3896025692492975, "3": 0.3546768366118025, "4": 0.33289843436370936, "5": 0.3259735046166198, "6": 0.30971497390606184, "7": 0.29626655961461257, "8": 0.2863307908470494, "9": 0.2753914090726616, "10": 0.26455238859895625, "11": 0.25491770373344036, "12": 0.2485949417904456, "13": 0.24678843837816133, "14": 0.23996386993175434, "15": 0.2332396627860297, "16": 0.23143315937374553, "17": 0.22390606182256123, "18": 0.22350461661983134, "19": 0.2195905258932156, "20": 0.217482938578884, "21": 0.21176234443998398, "22": 0.2106583701324769, "23": 0.20764753111200318, "24": 0.2052388598956243, "25": 0.20102368526696102, "26": 0.1963067041348856, "27": 0.1949016459253312, "28": 0.19409875551987155, "29": 0.1904857486953031, "30": 0.18747490967482938, "31": 0.18516659975913285, "32": 0.18446407065435566, "33": 0.18115214773183463, "34": 0.18004817342432755, "35": 0.17844239261340822}}}, {"candidate": {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05}, "metrics": {"cost_metric": {"1": 18.50484609603882, "2": 45.980664014816284, "3": 96.58328175544739, "4": 148.91678476333618, "5": 229.4046459197998, "6": 300.6477417945862, "7": 383.9124698638916, "8": 473.07399010658264, "9": 555.2527649402618}, "active_metric": {"1": 0.15496198479391754, "2": 0.1519607843137255, "3": 0.13645458183273307, "4": 0.14435774309723892, "5": 0.13225290116046418, "6": 0.1253501400560224, "7": 0.12735094037615047, "8": 0.13265306122448983, "9": 0.12104841936774713}}}, {"candidate": {"n_units_1": 347, "n_units_2": 566, "batch_size": 48, "dropout_1": 0.40991313560097764, "dropout_2": 0.1486640484580416, "learning_rate": 0.0001521657976426163, "wd": 2.46706548222209e-07}, "metrics": {"cost_metric": {"1": 18.432047128677368}, "active_metric": {"1": 0.16786858974358976}}}, {"candidate": {"n_units_1": 91, "n_units_2": 459, "batch_size": 105, "dropout_1": 0.48639033141890325, "dropout_2": 0.21324913218446714, "learning_rate": 0.00013769715715418189, "wd": 0.02017249366944585}, "metrics": {"cost_metric": {"1": 17.439072132110596}, "active_metric": {"1": 0.3006516290726817}}}], "failed_candidates": [], "pending_candidates": [{"n_units_1": 774, "n_units_2": 917, "batch_size": 29, "dropout_1": 0.7778923725289609, "dropout_2": 0.7413003050986398, "learning_rate": 6.472832341968678e-05, "wd": 0.0007744951242384949, "RESOURCE_ATTR_epoch": 1}, {"n_units_1": 514, "n_units_2": 514, "batch_size": 68, "dropout_1": 0.495, "dropout_2": 0.495, "learning_rate": 0.0010000000000000002, "wd": 9.999999999999991e-05, "RESOURCE_ATTR_epoch": 2}, {"n_units_1": 38, "n_units_2": 187, "batch_size": 53, "dropout_1": 0.36209963448394383, "dropout_2": 0.09749003575393035, "learning_rate": 1.180123718822517e-05, "wd": 0.00011948182727147607, "RESOURCE_ATTR_epoch": 3}]}') # elapsed_time = 50.29185605049133 # num_observations = 5 # num_configs = 4 @pytest.mark.parametrize( "_model_params, _state", zip(_model_params, _state)) def test_compare_gpiss_likelihood_oldnew(_model_params, _state): config_space = { 'n_units_1': randint(4, 1024), 'n_units_2': randint(4, 1024), 'batch_size': randint(8, 128), 'dropout_1': uniform(0, 0.99), 'dropout_2': uniform(0, 0.99), 'learning_rate': loguniform(1e-6, 1), 'wd': loguniform(1e-8, 1), 'epochs': 81, } gpiss_model_factory = [] # new, old model_params = json.loads(_model_params) kwargs = dict(no_fantasizing=True) gpiss_objs = build_gpiss_model_factory( config_space, model_params, **kwargs) config_space_ext = gpiss_objs['config_space_ext'] gpiss_model_factory.append(gpiss_objs['model_factory']) gpiss_model_factory.append(build_gpiss_model_factory( config_space, model_params, use_new_code=False, **kwargs)['model_factory']) state = decode_state_from_old_encoding( enc_state=json.loads(_state), hp_ranges=config_space_ext.hp_ranges_ext) # Compare likelihoods likelihood = [ factory.model( state, fit_params=False).posterior_states[0].poster_state['likelihood'] for factory in gpiss_model_factory] for name, value in likelihood[0].items(): if name != 'num_data': np.testing.assert_almost_equal(value, likelihood[1][name]) @pytest.mark.parametrize( "_model_params, _state", zip(_model_params, _state)) def test_compare_gpiss_likelihood_fantasizing_oldnew(_model_params, _state): config_space = { 'n_units_1': randint(4, 1024), 'n_units_2': randint(4, 1024), 'batch_size': randint(8, 128), 'dropout_1': uniform(0, 0.99), 'dropout_2': uniform(0, 0.99), 'learning_rate': loguniform(1e-6, 1), 'wd': loguniform(1e-8, 1), 'epochs': 81, } num_fantasy_samples = 10 gpiss_model_factory = [] # new, old model_params = json.loads(_model_params) kwargs = dict( num_fantasy_samples=num_fantasy_samples, no_fantasizing=False) gpiss_objs = build_gpiss_model_factory( config_space, model_params, **kwargs) config_space_ext = gpiss_objs['config_space_ext'] gpiss_model_factory.append(gpiss_objs['model_factory']) gpiss_model_factory.append(build_gpiss_model_factory( config_space, model_params, use_new_code=False, **kwargs)['model_factory']) state = decode_state_from_old_encoding( enc_state=json.loads(_state), hp_ranges=config_space_ext.hp_ranges_ext) # Compare likelihoods # We need to force them to use the same fantasy samples gpiss_model1 = gpiss_model_factory[0].model(state, fit_params=False) likelihood = [gpiss_model1.posterior_states[0].poster_state['likelihood']] gpiss_model2 = gpiss_model_factory[1].model_for_fantasy_samples( state, fantasy_samples=gpiss_model1.fantasy_samples) likelihood.append( gpiss_model2.posterior_states[0].poster_state['likelihood']) for name, value in likelihood[0].items(): if name != 'num_data': np.testing.assert_almost_equal(value, likelihood[1][name])
352.063725
7,405
0.727127
9,042
71,821
5.578744
0.068016
0.029737
0.017346
0.026644
0.926571
0.924509
0.915112
0.909522
0.906528
0.906528
0
0.521058
0.083235
71,821
203
7,406
353.79803
0.245053
0.02193
0
0.448819
0
0.15748
0.935735
0.142078
0
0
0
0
0.015748
1
0.031496
false
0
0.062992
0.007874
0.110236
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
1
1
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
11
0aba33fb0920a7ce97c01be6b53768918e904683
27,669
py
Python
src/xr_events/migrations/0004_add_localgroup_references_and_adjust_event_fields.py
JulianAkkaya95/xr-web
f86bb8f00173c73350f7283fa22dcbdbf9660bd3
[ "MIT" ]
4
2019-03-28T20:49:59.000Z
2019-08-11T19:31:35.000Z
src/xr_events/migrations/0004_add_localgroup_references_and_adjust_event_fields.py
JulianAkkaya95/xr-web
f86bb8f00173c73350f7283fa22dcbdbf9660bd3
[ "MIT" ]
4
2019-05-08T18:07:45.000Z
2021-05-08T17:29:46.000Z
src/xr_events/migrations/0004_add_localgroup_references_and_adjust_event_fields.py
JulianAkkaya95/xr-web
f86bb8f00173c73350f7283fa22dcbdbf9660bd3
[ "MIT" ]
5
2019-03-28T20:50:15.000Z
2020-01-17T21:16:57.000Z
# Generated by Django 2.1.7 on 2019-03-24 18:08 from django.db import migrations, models import django.db.models.deletion import wagtail.core.blocks import wagtail.core.fields import wagtail.embeds.blocks import wagtail.images.blocks class Migration(migrations.Migration): dependencies = [ ("xr_pages", "0024_create_model_localgroup"), ("xr_events", "0003_page_content_fields_allow_blank"), ] operations = [ migrations.AddField( model_name="eventgrouppage", name="group", field=models.OneToOneField( null=True, on_delete=django.db.models.deletion.PROTECT, to="xr_pages.LocalGroup", ), ), migrations.AddField( model_name="eventpage", name="content", field=wagtail.core.fields.StreamField( [ ( "text", wagtail.core.blocks.StructBlock( [ ( "text", wagtail.core.blocks.RichTextBlock( features=[ "h2", "h3", "h4", "bold", "italic", "ul", "ol", "hr", "link", "document-link", ] ), ) ] ), ), ( "image", wagtail.core.blocks.StructBlock( [ ("image", wagtail.images.blocks.ImageChooserBlock()), ( "alternative_title", wagtail.core.blocks.CharBlock(required=False), ), ( "caption", wagtail.core.blocks.CharBlock(required=False), ), ( "attribution", wagtail.core.blocks.CharBlock(required=False), ), ( "link", wagtail.core.blocks.StructBlock( [ ( "internal_link", wagtail.core.blocks.PageChooserBlock( required=False ), ), ( "external_link", wagtail.core.blocks.URLBlock( required=False ), ), ], required=False, ), ), ( "align", wagtail.core.blocks.ChoiceBlock( choices=[ ("full_content", "Full content"), ("left", "Left"), ("right", "Right"), ("full_page", "Full page"), ] ), ), ] ), ), ( "video", wagtail.core.blocks.StructBlock( [ ("video", wagtail.embeds.blocks.EmbedBlock()), ( "caption", wagtail.core.blocks.CharBlock(required=False), ), ( "align", wagtail.core.blocks.ChoiceBlock( choices=[ ("full_content", "Full content"), ("left", "Left"), ("right", "Right"), ("full_page", "Full page"), ] ), ), ] ), ), ( "message", wagtail.core.blocks.StructBlock( [ ( "message", wagtail.core.blocks.StructBlock( [ ( "text", wagtail.core.blocks.RichTextBlock( features=[ "h2", "h3", "h4", "bold", "italic", "ul", "ol", "hr", "link", "document-link", ] ), ) ] ), ), ( "font_size_factor", wagtail.core.blocks.FloatBlock(default=1), ), ( "font_color", wagtail.core.blocks.ChoiceBlock( choices=[ ("xr-green", "XR green"), ("xr-yellow", "XR yellow"), ("xr-light-blue", "XR light blue"), ("xr-dark-blue", "XR dark blue"), ("xr-white", "XR white"), ("xr-black", "XR black"), ] ), ), ( "background_color", wagtail.core.blocks.ChoiceBlock( choices=[ ("xr-green", "XR green"), ("xr-yellow", "XR yellow"), ("xr-light-blue", "XR light blue"), ("xr-dark-blue", "XR dark blue"), ("xr-white", "XR white"), ("xr-black", "XR black"), ] ), ), ( "background_image", wagtail.images.blocks.ImageChooserBlock( required=False ), ), ( "link", wagtail.core.blocks.StructBlock( [ ( "internal_link", wagtail.core.blocks.PageChooserBlock( required=False ), ), ( "external_link", wagtail.core.blocks.URLBlock( required=False ), ), ] ), ), ( "align", wagtail.core.blocks.ChoiceBlock( choices=[ ("full_content", "Full content"), ("left", "Left"), ("right", "Right"), ("full_page", "Full page"), ] ), ), ] ), ), ( "carousel", wagtail.core.blocks.StructBlock( [ ( "items", wagtail.core.blocks.StreamBlock( [ ( "image", wagtail.core.blocks.StructBlock( [ ( "image", wagtail.images.blocks.ImageChooserBlock(), ), ( "alternative_title", wagtail.core.blocks.CharBlock( required=False ), ), ( "caption", wagtail.core.blocks.CharBlock( required=False ), ), ( "attribution", wagtail.core.blocks.CharBlock( required=False ), ), ( "link", wagtail.core.blocks.StructBlock( [ ( "internal_link", wagtail.core.blocks.PageChooserBlock( required=False ), ), ( "external_link", wagtail.core.blocks.URLBlock( required=False ), ), ], required=False, ), ), ] ), ), ( "video", wagtail.core.blocks.StructBlock( [ ( "video", wagtail.embeds.blocks.EmbedBlock(), ), ( "caption", wagtail.core.blocks.CharBlock( required=False ), ), ] ), ), ( "message", wagtail.core.blocks.StructBlock( [ ( "message", wagtail.core.blocks.StructBlock( [ ( "text", wagtail.core.blocks.RichTextBlock( features=[ "h2", "h3", "h4", "bold", "italic", "ul", "ol", "hr", "link", "document-link", ] ), ) ] ), ), ( "font_size_factor", wagtail.core.blocks.FloatBlock( default=1 ), ), ( "font_color", wagtail.core.blocks.ChoiceBlock( choices=[ ( "xr-green", "XR green", ), ( "xr-yellow", "XR yellow", ), ( "xr-light-blue", "XR light blue", ), ( "xr-dark-blue", "XR dark blue", ), ( "xr-white", "XR white", ), ( "xr-black", "XR black", ), ] ), ), ( "background_color", wagtail.core.blocks.ChoiceBlock( choices=[ ( "xr-green", "XR green", ), ( "xr-yellow", "XR yellow", ), ( "xr-light-blue", "XR light blue", ), ( "xr-dark-blue", "XR dark blue", ), ( "xr-white", "XR white", ), ( "xr-black", "XR black", ), ] ), ), ( "background_image", wagtail.images.blocks.ImageChooserBlock( required=False ), ), ( "link", wagtail.core.blocks.StructBlock( [ ( "internal_link", wagtail.core.blocks.PageChooserBlock( required=False ), ), ( "external_link", wagtail.core.blocks.URLBlock( required=False ), ), ] ), ), ] ), ), ] ), ), ( "align", wagtail.core.blocks.ChoiceBlock( choices=[ ("full_content", "Full content"), ("left", "Left"), ("right", "Right"), ("full_page", "Full page"), ] ), ), ] ), ), ], blank=True, help_text="The content is only visible on the detail page.", ), ), migrations.AddField( model_name="eventpage", name="group", field=models.ForeignKey( editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name="events", to="xr_pages.LocalGroup", ), ), migrations.AlterField( model_name="eventpage", name="description", field=models.CharField( blank=True, default="", help_text="A description not only for the detail view, but also for lists, teasers or social media.", max_length=254, ), ), migrations.AlterField( model_name="eventpage", name="image", field=models.ForeignKey( blank=True, help_text="An image that can be used not only for the detail view, but also for lists, teasers or social media.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="wagtailimages.Image", ), ), migrations.AlterField( model_name="eventpage", name="location", field=models.CharField( blank=True, help_text='Some city or address, like you would enter in GMaps or OpenStreetMap, e.g. "Berlin", "Somestreet 84, 12345 Samplecity".', max_length=255, ), ), ]
56.352342
148
0.157505
772
27,669
5.569948
0.204663
0.120233
0.177907
0.091163
0.793953
0.771628
0.723721
0.723721
0.714884
0.693953
0
0.008172
0.79215
27,669
490
149
56.467347
0.739524
0.001626
0
0.700413
1
0.004132
0.067229
0.002317
0
0
0
0
0
1
0
false
0
0.012397
0
0.018595
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
e407511524e727ed91c2dc332076a28af6d183f8
16,694
py
Python
python/test/types_tests/int32_type.py
Bhaskers-Blu-Org2/omi-script-provider
f2efe4434617d02887f1b99f7467e9d9203364a1
[ "MIT" ]
4
2019-06-16T02:29:23.000Z
2021-04-20T16:09:19.000Z
python/test/types_tests/int32_type.py
microsoft/omi-script-provider
f2efe4434617d02887f1b99f7467e9d9203364a1
[ "MIT" ]
null
null
null
python/test/types_tests/int32_type.py
microsoft/omi-script-provider
f2efe4434617d02887f1b99f7467e9d9203364a1
[ "MIT" ]
4
2019-11-03T11:52:56.000Z
2020-08-05T14:54:06.000Z
from omi import * try: from utils import * except ImportError: import sys sys.path.insert(0, '..') from utils import * def uint32_test(): be = BookEnd('uint32_test') rval = True # init (empty) v0 = MI_Uint32() if v0.getType() != MI_UINT32: BookEndPrint('----- getType failed') rval = False if v0.value is not None: BookEndPrint('----- empty init failed') rval = False # init to None v1 = MI_Uint32(None) if v1.value is not None: BookEndPrint('----- NULL init failed') rval = False # init to value r2 = random.randint(0, 0xFFFFFFFF) v2 = MI_Uint32(r2) if v2.value != r2: BookEndPrint('----- value init failed') rval = False # init to MI_Uint32 (None) t3 = MI_Uint32() if COPY_CTOR: v3 = MI_Uint32(t3) if v3.value != t3.value: BookEndPrint('----- MI_Uint32 (None) init failed') rval = False else: try: v3 = MI_Uint32(t3) except ValueError: pass else: BookEndPrint('----- init using copy ctor failed') rval = False # init to MI_Uint32 t4 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) if COPY_CTOR: v4 = MI_Uint32(t4) if v4.value != t4.value: BookEndPrint('----- MI_Uint32 init failed') rval = False else: try: v4 = MI_Uint32(t4) except ValueError: pass else: BookEndPrint('----- init using copy ctor failed') rval = False # init to a different MI type (None) **error** t5 = MI_Boolean() try: v5 = MI_Uint32(t5) except ValueError: pass else: BookEndPrint('----- init to a different MI type (None) failed') rval = False # init to a different MI type **error** t6 = MI_Boolean(True) try: v6 = MI_Uint32(t6) except ValueError: pass else: BookEndPrint('----- init to a different MI type failed') rval = False # init to invalid literal value **error** try: v7 = MI_Uint32('seven') except ValueError: pass else: BookEndPrint('----- init to invalid literal failed') rval = False # init to under-range value **error** try: v8 = MI_Uint32(-1) except: pass else: BookEndPrint('----- init to under-range value failed') rval = False # init to over-range value **error** try: v9 = MI_Uint32(0x100000000) except: pass else: BookEndPrint('----- init to over-range value failed') rval = False # assign None to None v10 = MI_Uint32() v10.value = None if v10.value is not None: BookEndPrint('----- None assignment to None failed') rval = False # assign a value to None v11 = MI_Uint32() r11 = random.randint(0, 0xFFFFFFFF) v11.value = r11 if v11.value != r11: BookEndPrint('----- literal value assignment to None failed') rval = False # assign MI_Uint32 (None) to None v12 = MI_Uint32() t12 = MI_Uint32() if ASSIGN_OP: v12.value = t12 if v12.value != t12.value: BookEndPrint('----- MI_Uint32 (None) assignment to None failed') rval = False else: try: v12.value = t12 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign MI_Uint32 to None v13 = MI_Uint32() t13 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) if ASSIGN_OP: v13.value = t13 if v13.value != t13.value: BookEndPrint('----- MI_Uint32 assignment to None failed') rval = False else: try: v13.value = t13 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign a different MI type (None) to None **error** v14 = MI_Uint32() t14 = MI_Boolean() try: v14.value = t14 except ValueError: pass else: BookEndPrint('----- assign a different MI type (None) failed') rval = False # assign a different MI type to None **error** v15 = MI_Uint32() t15 = MI_Boolean(False) try: v15.value = t15 except ValueError: pass else: BookEndPrint('----- assign a different MI type failed') rval = False # assign invalid literal to None **error** v16 = MI_Uint32() try: v16.value = 'sixteen' except: pass else: BookEndPrint('----- MI_Boolean assign invalid literal failed') rval = False # assign under-range value to None **error** v17 = MI_Uint32() try: v17.value = -1 except: pass else: BookEndPrint('----- assign under-range value to None failed') rval = False # assign over-range value to None **error** v18 = MI_Uint32() try: v18.value = 0x100000000 except: pass else: BookEndPrint('----- assign over-range value to None failed') rval = False # assign None v19 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) v19.value = None if v19.value is not None: BookEndPrint('----- None assignment failed') rval = False # assign a literal value r20a = random.randint(0, 0xFFFFFFFF) r20b = random.randint(0, 0xFFFFFFFF) while r20a == r20b: r20b = random.randint(0, 0xFFFFFFFF) v20 = MI_Uint32(r20a) v20.value = r20b if v20.value != r20b: BookEndPrint('----- value assignment failed') rval = False # assign MI_Uint32 (None) v21 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) t21 = MI_Uint32() if ASSIGN_OP: v21.value = t21 if v21.value != t21.value: BookEndPrint('----- MI_Uint32 (None) assignment failed') rval = False else: try: v21.value = t21 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign MI_Uint32 r22a = random.randint(0, 0xFFFFFFFF) r22b = random.randint(0, 0xFFFFFFFF) while r22a == r22b: r22b = random.randint(0, 0xFFFFFFFF) v22 = MI_Uint32(r22a) t22 = MI_Uint32(r22b) if ASSIGN_OP: v22.value = t22 if v22.value != t22.value: BookEndPrint('----- MI_Uint32 assignment failed') rval = False else: try: v22.value = t22 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign a different MI type (None) **error** v23 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) t23 = MI_Boolean() try: v23.value = t23 except ValueError: pass else: BookEndPrint('----- assign a different MI type (None) failed') rval = False # assign a different MI type **error** v24 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) t24 = MI_Boolean(False) try: v24.value = t24 except ValueError: pass else: BookEndPrint('----- assign a different MI type failed') rval = False # assign invalid literal **error** v25 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) try: v25.value = 'twenty-five' except ValueError: pass else: BookEndPrint('----- assign invalid literal failed') rval = False # assign under-range value **error** v26 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) try: v26.value = -1 except: pass else: BookEndPrint('----- assign under-range value failed') rval = False # assign over-range value **error** v27 = MI_Uint32(random.randint(0, 0xFFFFFFFF)) try: v27.value = 0x100000000 except: pass else: BookEndPrint('----- assign over-range value failed') rval = False if not rval: BookEndPrint('!!!!! Tests have failed! (MI_Uint32)') return rval def sint32_test(): be = BookEnd('sint32_test') rval = True # init (empty) v0 = MI_Sint32() if v0.getType() != MI_SINT32: BookEndPrint('----- getType failed') rval = False if v0.value is not None: BookEndPrint('----- empty init failed') rval = False # init to None v1 = MI_Sint32(None) if v1.value is not None: BookEndPrint('----- NULL init failed') rval = False # init to value r2 = random.randint(-0x80000000, 0x7FFFFFFF) v2 = MI_Sint32(r2) if v2.value != r2: BookEndPrint('----- value init failed') rval = False # init to MI_Sint32 (None) t3 = MI_Sint32() if COPY_CTOR: v3 = MI_Sint32(t3) if v3.value != t3.value: BookEndPrint('----- MI_Sint32 (None) init failed') rval = False else: try: v3 = MI_Sint32(t3) except ValueError: pass else: BookEndPrint('----- init using copy ctor failed') rval = False # init to MI_Sint32 t4 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) if COPY_CTOR: v4 = MI_Sint32(t4) if v4.value != t4.value: BookEndPrint('----- MI_Sint32 init failed') rval = False else: try: v4 = MI_Sint32(t4) except ValueError: pass else: BookEndPrint('----- init using copy ctor failed') rval = False # init to a different MI type (None) **error** t5 = MI_Boolean() try: v5 = MI_Sint32(t5) except ValueError: pass else: BookEndPrint('----- init to a different MI type (None) failed') rval = False # init to a different MI type **error** t6 = MI_Boolean(True) try: v6 = MI_Sint32(t6) except ValueError: pass else: BookEndPrint('----- init to a different MI type failed') rval = False # init to invalid literal value **error** try: v7 = MI_Sint32('seven') except ValueError: pass else: BookEndPrint('----- init to invalid literal failed') rval = False # init to under-range value **error** try: v8 = MI_Sint32(-0x80000001) except: pass else: BookEndPrint('----- init to under-range value failed') rval = False # init to over-range value **error** try: v9 = MI_Sint32(0x80000000) except: pass else: BookEndPrint('----- init to over-range value failed') rval = False # assign None to None v10 = MI_Sint32() v10.value = None if v10.value is not None: BookEndPrint('----- None assignment to None failed') rval = False # assign a value to None v11 = MI_Sint32() r11 = random.randint(-0x80000000, 0x7FFFFFFF) v11.value = r11 if v11.value != r11: BookEndPrint('----- literal value assignment to None failed') rval = False # assign MI_Sint32 (None) to None v12 = MI_Sint32() t12 = MI_Sint32() if ASSIGN_OP: v12.value = t12 if v12.value != t12.value: BookEndPrint('----- MI_Sint32 (None) assignment to None failed') rval = False else: try: v12.value = t12 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign MI_Sint32 to None v13 = MI_Sint32() t13 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) if ASSIGN_OP: v13.value = t13 if v13.value != t13.value: BookEndPrint('----- MI_Sint32 assignment to None failed') rval = False else: try: v13.value = t13 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign a different MI type (None) to None **error** v14 = MI_Sint32() t14 = MI_Boolean() try: v14.value = t14 except ValueError: pass else: BookEndPrint('----- assign a different MI type (None) failed') rval = False # assign a different MI type to None **error** v15 = MI_Sint32() t15 = MI_Boolean(False) try: v15.value = t15 except ValueError: pass else: BookEndPrint('----- assign a different MI type failed') rval = False # assign invalid literal to None **error** v16 = MI_Sint32() try: v16.value = 'sixteen' except: pass else: BookEndPrint('----- MI_Boolean assign invalid literal failed') rval = False # assign under-range value to None **error** v17 = MI_Sint32() try: v17.value = -0x80000001 except: pass else: BookEndPrint('----- assign under-range value to None failed') rval = False # assign over-range value to None **error** v18 = MI_Sint32() try: v18.value = 0x80000000 except: pass else: BookEndPrint('----- assign over-range value to None failed') rval = False # assign None v19 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) v19.value = None if v19.value is not None: BookEndPrint('----- None assignment failed') rval = False # assign a literal value r20a = random.randint(-0x80000000, 0x7FFFFFFF) r20b = random.randint(-0x80000000, 0x7FFFFFFF) while r20a == r20b: r20b = random.randint(-0x80000000, 0x7FFFFFFF) v20 = MI_Sint32(r20a) v20.value = r20b if v20.value != r20b: BookEndPrint('----- value assignment failed') rval = False # assign MI_Sint32 (None) v21 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) t21 = MI_Sint32() if ASSIGN_OP: v21.value = t21 if v21.value != t21.value: BookEndPrint('----- MI_Sint32 (None) assignment failed') rval = False else: try: v21.value = t21 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign MI_Sint32 r22a = random.randint(-0x80000000, 0x7FFFFFFF) r22b = random.randint(-0x80000000, 0x7FFFFFFF) while r22a == r22b: r22b = random.randint(-0x80000000, 0x7FFFFFFF) v22 = MI_Sint32(r22a) t22 = MI_Sint32(r22b) if ASSIGN_OP: v22.value = t22 if v22.value != t22.value: BookEndPrint('----- MI_Sint32 assignment failed') rval = False else: try: v22.value = t22 except ValueError: pass else: BookEndPrint('----- assignment operator failed') rval = False # assign a different MI type (None) **error** v23 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) t23 = MI_Boolean() try: v23.value = t23 except ValueError: pass else: BookEndPrint('----- assign a different MI type (None) failed') rval = False # assign a different MI type **error** v24 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) t24 = MI_Boolean(False) try: v24.value = t24 except ValueError: pass else: BookEndPrint('----- assign a different MI type failed') rval = False # assign invalid literal **error** v25 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) try: v25.value = 'twenty-five' except ValueError: pass else: BookEndPrint('----- assign invalid literal failed') rval = False # assign under-range value **error** v26 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) try: v26.value = -0x80000001 except: pass else: BookEndPrint('----- assign under-range value failed') rval = False # assign over-range value **error** v27 = MI_Sint32(random.randint(-0x80000000, 0x7FFFFFFF)) try: v27.value = 0x80000000 except: pass else: BookEndPrint('----- assign over-range value failed') rval = False if not rval: BookEndPrint('!!!!! Tests have failed! (MI_Sint32)') return rval
25.643625
76
0.553193
1,878
16,694
4.848243
0.061235
0.076881
0.115321
0.083031
0.905327
0.865129
0.818561
0.765513
0.744865
0.737397
0
0.085355
0.338205
16,694
650
77
25.683077
0.738776
0.102971
0
0.802657
0
0
0.177772
0
0
0
0.040421
0
0
1
0.003795
false
0.079696
0.009488
0
0.017078
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
7c2cae6f7aa3e0309ea74cd1b2de9d0ff83a17da
140,771
py
Python
applications/multisite/intersite_test.py
richardstrnad/acitoolkit
7e559dbb0b2d22ecf980733d6f8b7c894ecfdbde
[ "Apache-2.0" ]
351
2015-01-15T14:44:36.000Z
2022-03-27T17:06:52.000Z
applications/multisite/intersite_test.py
richardstrnad/acitoolkit
7e559dbb0b2d22ecf980733d6f8b7c894ecfdbde
[ "Apache-2.0" ]
215
2015-01-11T07:05:12.000Z
2021-12-12T15:18:10.000Z
applications/multisite/intersite_test.py
richardstrnad/acitoolkit
7e559dbb0b2d22ecf980733d6f8b7c894ecfdbde
[ "Apache-2.0" ]
324
2015-01-07T10:03:18.000Z
2022-02-23T21:48:13.000Z
""" Test suite for Intersite application """ import unittest from acitoolkit import (AppProfile, EPG, Endpoint, Interface, L2Interface, Context, BridgeDomain, Session, Tenant, IPEndpoint, OutsideL3, OutsideEPG, OutsideNetwork, Contract) from intersite import execute_tool, IntersiteTag, CommandLine, get_arg_parser import argparse import logging from StringIO import StringIO import mock import sys if sys.version_info.major == 2: import __builtin__ as builtins else: import builtins import json import time import logging try: from multisite_test_credentials import (SITE1_IPADDR, SITE1_LOGIN, SITE1_PASSWORD, SITE1_URL, SITE2_IPADDR, SITE2_LOGIN, SITE2_PASSWORD, SITE2_URL, SITE3_IPADDR, SITE3_LOGIN, SITE3_PASSWORD, SITE3_URL, SITE4_IPADDR, SITE4_LOGIN, SITE4_PASSWORD, SITE4_URL) except ImportError: print(''' Please create a file called multisite_test_credentials.py with the following: SITE1_IPADDR = '' SITE1_LOGIN = '' SITE1_PASSWORD = '' SITE1_URL = 'http://' + SITE1_IPADDR # change http to https for SSL SITE2_IPADDR = '' SITE2_LOGIN = '' SITE2_PASSWORD = '' SITE2_URL = 'http://' + SITE2_IPADDR SITE3_IPADDR = '' SITE3_LOGIN = '' SITE3_PASSWORD = '' SITE3_URL = 'http://' + SITE3_IPADDR SITE4_IPADDR = '' SITE4_LOGIN = '' SITE4_PASSWORD = '' SITE4_URL = 'http://' + SITE3_IPADDR ''') sys.exit(0) class FakeStdio(object): """ FakeStdio : Class to fake writing to stdio and store it so that it can be verified """ def __init__(self): self.output = [] def write(self, *args, **kwargs): """ Mock the write routine :param args: Args passed to stdio write :param kwargs: Kwargs passed to stdio write :return: None """ for arg in args: self.output.append(arg) def verify_output(self, output): """ Verify that the output is the same as generated previously :param output: Output to test for :return: True if the same as the stored output. False otherwise """ return output == self.output class TestToolOptions(unittest.TestCase): """ Test cases for testing the command line arguments """ @staticmethod def get_logging_level(): """ Return the current logger level :return: Logger level """ return logging.getLevelName(logging.getLogger().getEffectiveLevel()) def test_no_options(self): """ Test no configuration file given. Verify that it generates an error message """ args = mock.Mock() args.debug = None args.generateconfig = None args.config = None with mock.patch('sys.stdout', new=StringIO()) as fake_out: execute_tool(args) self.assertEqual(fake_out.getvalue(), '%% No configuration file given.\n') def test_generateconfig(self): """ Test generate sample configuration file. Verify that it generates the correct text message """ args = mock.Mock() args.debug = None args.generateconfig = True args.config = None expected_text = ('Sample configuration file written to sample_config.json\n' "Replicate the site JSON for each site.\n" " Valid values for use_https and local are 'True' and 'False'\n" " One site must have local set to 'True'\n" 'Replicate the export JSON for each exported contract.\n') with mock.patch('sys.stdout', new=StringIO()) as fake_out: execute_tool(args) self.assertEqual(fake_out.getvalue(), expected_text) def test_set_debug_to_verbose(self): """ Test setting the debug level to verbose """ args = mock.Mock() args.debug = 'verbose' args.config = None execute_tool(args) def test_set_debug_to_warnings(self): """ Test setting the debug level to warnings """ args = mock.Mock() args.debug = 'warnings' args.config = None execute_tool(args) def test_set_debug_to_critical(self): """ Test setting the debug level to critical """ args = mock.Mock() args.debug = 'critical' args.config = None execute_tool(args) def test_config_bad_filename(self): """ Test no configuration file given. Verify that it generates an error message """ args = mock.Mock() args.debug = None args.generateconfig = None args.config = 'jkdhfdskjfhdsfkjhdsfdskjhf.jdkhfkfjh' expected_text = '%% Unable to open configuration file jkdhfdskjfhdsfkjhdsfdskjhf.jdkhfkfjh\n' with mock.patch('sys.stdout', new=StringIO()) as fake_out: execute_tool(args) self.assertEqual(fake_out.getvalue(), expected_text) def test_get_arg_parser(self): self.assertIsInstance(get_arg_parser(), argparse.ArgumentParser) class TestBadConfiguration(unittest.TestCase): """ Test various invalid configuration files """ @staticmethod def create_empty_config_file(): """ Generate an empty configuration file with only a single empty Site policy :return: dictionary containing the configuration """ config = { "config": [ { "site": { "username": SITE1_LOGIN, "name": "site1", "ip_address": SITE1_IPADDR, "password": SITE1_PASSWORD, "local": "True", "use_https": "True" } } ] } return config @staticmethod def get_args(): """ Generate an empty command line arguments :return: Instance of Mock to represent the command line arguments """ args = mock.Mock() args.debug = None args.generateconfig = None args.config = 'doesntmatter' return args @staticmethod def create_config_file(args, config, with_bad_json=False): config_filename = 'testsuite_cfg.json' args.config = config_filename config_file = open(config_filename, 'w') config_file.write(str(json.dumps(config))) if with_bad_json: config_file.write(']]]') config_file.close() def test_no_config_keyword(self): """ Test no "config" present in the JSON. Verify that the correct error message is generated. :return: None """ args = self.get_args() config = { "site": { "username": "", "name": "", "ip_address": "", "password": "", "local": "", "use_https": "" } } temp = sys.stdout fake_out = FakeStdio() sys.stdout = fake_out self.create_config_file(args, config) execute_tool(args, test_mode=True) sys.stdout = temp self.assertTrue(fake_out.verify_output(['%% Invalid configuration file', '\n'])) def test_bad_json_file(self): """ Test bad JSON in the file. Verify that the correct error message is generated. :return: None """ args = self.get_args() config = { "site": { "username": "", "name": "", "ip_address": "", "password": "", "local": "", "use_https": "" } } temp = sys.stdout fake_out = FakeStdio() sys.stdout = fake_out self.create_config_file(args, config, with_bad_json=True) execute_tool(args, test_mode=True) sys.stdout = temp self.assertTrue(fake_out.verify_output(['%% File could not be decoded as JSON.', '\n'])) def test_site_with_bad_ipaddress(self): """ Test invalid IP address value in the JSON. Verify that the correct exception is generated. :return: None """ args = self.get_args() config = self.create_empty_config_file() config['config'][0]['site']['ip_address'] = 'bogu$' self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_site_with_bad_ipaddress_as_number(self): """ Test invalid IP address value in the JSON. Verify that the correct exception is generated. :return: None """ args = self.get_args() config = self.create_empty_config_file() config['config'][0]['site']['ip_address'] = 100 self.create_config_file(args, config) self.assertRaises(TypeError, execute_tool, args, test_mode=True) def test_site_with_good_ipaddress_and_bad_userid(self): """ Test good IP address value but invalid username in the JSON. Verify that the correct exception is generated. :return: None """ args = self.get_args() config = self.create_empty_config_file() config['config'][0]['site']['username'] = '' config['config'][0]['site']['ip_address'] = SITE1_IPADDR config['config'][0]['site']['local'] = 'True' config['config'][0]['site']['use_https'] = 'True' self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_site_with_bad_local_setting(self): """ Test with bad local setting in the site JSON. Verify that the correct exception is generated. :return: None """ args = self.get_args() config = self.create_empty_config_file() config['config'][0]['site']['username'] = 'admin' config['config'][0]['site']['ip_address'] = SITE1_IPADDR config['config'][0]['site']['local'] = 'BAD' config['config'][0]['site']['use_https'] = 'True' self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_site_with_bad_use_https(self): """ Test with bad use_https setting in the site JSON. Verify that the correct exception is generated. :return: None """ args = self.get_args() config = self.create_empty_config_file() config['config'][0]['site']['username'] = 'admin' config['config'][0]['site']['ip_address'] = SITE1_IPADDR config['config'][0]['site']['local'] = 'True' config['config'][0]['site']['use_https'] = 'BAD' self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_reload_bad_config_filename(self): """ Test reload_config with a non-existent filename :return: None """ # Create a valid configuration args = self.get_args() config = self.create_empty_config_file() self.create_config_file(args, config) collector = execute_tool(args, test_mode=True) # Check that a bad config filename reload behaves as expected collector.config_filename = 'nonexistent.json' self.assertFalse(collector.reload_config()) def test_reload_bad_json_in_file(self): """ Test reload_config with a badly formatted JSON file :return: None """ # Create a valid configuration args = self.get_args() config = self.create_empty_config_file() self.create_config_file(args, config) collector = execute_tool(args, test_mode=True) # Create a badly formatted config file self.create_config_file(args, config, with_bad_json=True) self.assertFalse(collector.reload_config()) def test_reload_with_no_config_keyword(self): """ Test reload_config with no 'config' keyword in the JSON :return: None """ # Create a valid configuration args = self.get_args() config = self.create_empty_config_file() self.create_config_file(args, config) collector = execute_tool(args, test_mode=True) # Create a configuration file with no 'config' keyword config = { "site": { "username": "", "name": "", "ip_address": "", "password": "", "local": "", "use_https": "" } } self.create_config_file(args, config) self.assertFalse(collector.reload_config()) def test_reload_no_local_site_in_reloaded_config(self): """ Test reload_config with no local site specified in the JSON :return: None """ # Create a valid configuration args = self.get_args() config = self.create_empty_config_file() self.create_config_file(args, config) collector = execute_tool(args, test_mode=True) # Create a configuration with no local site config = self.create_empty_config_file() config['config'][0]['site']['local'] = 'False' self.create_config_file(args, config) # Reload self.assertFalse(collector.reload_config()) def test_oversized_intersite_tag(self): """ Test oversized string lengths for the entities that make up a Intersite tag """ # Create a configuration with long names args = self.get_args() config = self.create_empty_config_file() export_policy = { "export": { "tenant": "a" * 64, "app": "b" * 64, "epg": "c" * 64, "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "d" * 64, } } ] } } config['config'].append(export_policy) self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_duplicate_export_policy(self): """ Test oversized string lengths for the entities that make up a Intersite tag """ # Create a configuration with long names args = self.get_args() config = self.create_empty_config_file() export_policy = { "export": { "tenant": "mytenant", "app": "myapp", "epg": "myepg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "mysite", } } ] } } config['config'].append(export_policy) config['config'].append(export_policy) self.create_config_file(args, config) self.assertRaises(ValueError, execute_tool, args, test_mode=True) def test_bad_intersite_tag(self): """ Test bad intersite tag creation """ with self.assertRaises(AssertionError): IntersiteTag.fromstring('badstring') class BaseTestCase(unittest.TestCase): """ BaseTestCase: Base class to be used for creating other TestCases. Not to be instantiated directly. """ def setup_remote_site(self): """ Set up the remote site. Meant to be overridden by inheriting classes """ raise NotImplementedError def setup_local_site(self): """ Set up the local site. Meant to be overridden by inheriting classes """ raise NotImplementedError def setUp(self): """ Set up the test case. Setup the remote and local site. :return: None """ self.setup_remote_site() self.setup_local_site() def tearDown(self): """ Tear down the test case. Tear down the remote and local site. :return: None """ self.teardown_local_site() self.teardown_remote_site() time.sleep(2) @staticmethod def create_site_config(): """ Generate a basic configuration containing the local and remote site policies. Actual site credentials are set in global variables imported from multisite_test_credentials :return: dictionary containing the configuration """ config = { "config": [ { "site": { "username": "%s" % SITE1_LOGIN, "name": "Site1", "ip_address": "%s" % SITE1_IPADDR, "password": "%s" % SITE1_PASSWORD, "local": "True", "use_https": "False" } }, { "site": { "username": "%s" % SITE2_LOGIN, "name": "Site2", "ip_address": "%s" % SITE2_IPADDR, "password": "%s" % SITE2_PASSWORD, "local": "False", "use_https": "False" } } ] } return config @staticmethod def write_config_file(config, args): """ Write the configuration as a temporary file and set the command line arguments to read the file :param config: dictionary containing the configuration :param args: Mock of the command line arguments :return: None """ config_filename = 'testsuite_cfg.json' args.config = config_filename config_file = open(config_filename, 'w') config_file.write(str(json.dumps(config))) config_file.close() def verify_remote_site_has_entry(self, mac, ip, tenant_name, l3out_name, remote_epg_name): """ Verify that the remote site has the entry :param mac: String containing the MAC address of the endpoint to find on the remote site :param ip: String containing the IP address of the endpoint to find on the remote site :param tenant_name: String containing the remote tenant name holding the endpoint :param l3out_name: String containing the remote OutsideL3 name holding the endpoint :param remote_epg_name: String containing the remote OutsideEPG on the remote OutsideL3 holding the endpoint :return: True if the remote site has the endpoint. False otherwise """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json?query-target=children' % (tenant_name, l3out_name, remote_epg_name)) resp = site2.get(query) self.assertTrue(resp.ok) found = False for item in resp.json()['imdata']: if 'l3extSubnet' in item: if item['l3extSubnet']['attributes']['ip'] == ip + '/32': found = True break if not found: return False return True def verify_remote_site_has_entry_with_contract(self, mac, ip, tenant_name, l3out_name, remote_epg_name, contract_name, contract_type): """ Verify that the remote site has the entry and provides the specfied contract :param mac: String containing the MAC address of the endpoint to find on the remote site :param ip: String containing the IP address of the endpoint to find on the remote site :param tenant_name: String containing the remote tenant name holding the endpoint :param l3out_name: String containing the remote OutsideL3 name holding the endpoint :param remote_epg_name: String containing the remote OutsideEPG on the remote OutsideL3 holding the endpoint :param contract_name: String containing the contract name that the remote OutsideEPG should be providing :param contract_type: String containing the contract usage. Valid values are 'provides', 'consumes', 'consumes_interface', and 'protected_by' :return: True if the remote site has the endpoint. False otherwise """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) assert contract_type in ['provides', 'consumes', 'consumes_interface', 'protected_by'] query = '/api/mo/uni/tn-%s/out-%s.json?query-target=subtree' % (tenant_name, l3out_name) resp = site2.get(query) self.assertTrue(resp.ok) # Look for l3extInstP found = False for item in resp.json()['imdata']: if 'l3extInstP' in item: if item['l3extInstP']['attributes']['name'] == remote_epg_name: found = True break if not found: return False # Verify that the l3extInstP is providing the contract found = False contract_types = {'provides': ['fvRsProv', 'tnVzBrCPName'], 'consumes': ['fvRsCons', 'tnVzBrCPName'], 'consumes_interface': ['fvRsConsIf', 'tnVzCPIfName'], 'protected_by': ['fvRsProtBy', 'tnVzTabooName'] } (aci_class, aci_class_ref) = contract_types[contract_type] for item in resp.json()['imdata']: if aci_class in item: if item[aci_class]['attributes'][aci_class_ref] == contract_name: found = True break if not found: return False return self.verify_remote_site_has_entry(mac, ip, tenant_name, l3out_name, remote_epg_name) def verify_remote_site_has_policy(self, tenant_name, l3out_name, instp_name): """ Verify that the remote site has the policy :param tenant_name: String containing the remote tenant name holding the policy :param l3out_name: String containing the remote OutsideL3 name holding the policy :param instp_name: String containing the remote OutsideEPG holding the policy :return: True if the remote site has the policy. False otherwise """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json' % (tenant_name, l3out_name, instp_name)) resp = site2.get(query) self.assertTrue(resp.ok) found = False for item in resp.json()['imdata']: if 'l3extInstP' in item: found = True break if not found: return False return True def teardown_local_site(self): """ Teardown the local site configuration """ site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() if not resp.ok: print(str(resp.status_code) + ' ' + resp.text) self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') tenant.mark_as_deleted() resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def teardown_remote_site(self): """ Teardown the remote site configuration """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') tenant.mark_as_deleted() resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) time.sleep(2) @staticmethod def get_args(): """ Get a mock of the command line arguments :return: Mock instance representing the command line arguments """ args = mock.Mock() args.debug = None args.generateconfig = None args.config = 'doesntmatter' return args def remove_endpoint(self, mac, ip, tenant_name, app_name, epg_name): """ Remove the endpoint :param mac: String containing the MAC address of the endpoint :param ip: String containing the IP address of the endpoint :param tenant_name: String containing the tenant name of the endpoint :param app_name: String containing the AppProfile name holding the endpoint :param epg_name: String containing the EPG name holding the endpoint :return: None """ self.add_endpoint(mac, ip, tenant_name, app_name, epg_name, mark_as_deleted=True) def add_endpoint(self, mac, ip, tenant_name, app_name, epg_name, mark_as_deleted=False): """ Add the endpoint :param mac: String containing the MAC address of the endpoint :param ip: String containing the IP address of the endpoint :param tenant_name: String containing the tenant name of the endpoint :param app_name: String containing the AppProfile name holding the endpoint :param epg_name: String containing the EPG name holding the endpoint :param mark_as_deleted: True or False. True if the endpoint is to be marked as deleted. Default is False :return: None """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant(tenant_name) app = AppProfile(app_name, tenant) epg = EPG(epg_name, app) ep = Endpoint(mac, epg) ep.mac = mac ep.ip = ip if mark_as_deleted: ep.mark_as_deleted() l3ep = IPEndpoint(ip, ep) # Create the physical interface object intf = Interface('eth', '1', '101', '1', '38') vlan_intf = L2Interface('vlan-5', 'vlan', '5') vlan_intf.attach(intf) # Attach the EPG to the VLAN interface epg.attach(vlan_intf) # Assign it to the L2Interface ep.attach(vlan_intf) urls = intf.get_url() jsons = intf.get_json() # Set the the phys domain, infra, and fabric for k in range(0, len(urls)): if jsons[k] is not None: resp = site1.push_to_apic(urls[k], jsons[k]) self.assertTrue(resp.ok) # Push the endpoint resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) time.sleep(1) class BaseEndpointTestCase(BaseTestCase): """ Base class for the endpoint test cases """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config def setup_with_endpoint(self, mac='00:11:22:33:33:33'): """ Set up the configuration with an endpoint :return: 2 strings containing the MAC and IP address of the endpoint """ args = self.get_args() self.write_config_file(self.create_config_file(), args) execute_tool(args, test_mode=True) ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') return mac, ip class TestBasicEndpoints(BaseEndpointTestCase): """ Basic tests for endpoints """ def test_basic_add_endpoint(self): """ Test add endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_add_multiple_endpoint(self): """ Test add multiple endpoints """ mac1, ip1 = self.setup_with_endpoint() mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_one_of_multiple_endpoint(self): """ Test remove one of multiple endpoints """ mac1, ip1 = self.setup_with_endpoint() mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) class TestBasicEndpointsWithMultipleRemoteSites(BaseEndpointTestCase): """ Basic tests for endpoints with multiple remote sites """ def setup_remote_site(self): """ Set up the remote site """ # Set up site 2 super(TestBasicEndpointsWithMultipleRemoteSites, self).setup_remote_site() # Create tenant, L3out with contract on site 3 site3 = Session(SITE3_URL, SITE3_LOGIN, SITE3_PASSWORD) resp = site3.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-site3') vrf = Context('myvrf', tenant) l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site3) self.assertTrue(resp.ok) def teardown_remote_site(self): """ Teardown the remote site configuration """ time.sleep(2) super(TestBasicEndpointsWithMultipleRemoteSites, self).teardown_remote_site() site3 = Session(SITE3_URL, SITE3_LOGIN, SITE3_PASSWORD) resp = site3.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-site3') tenant.mark_as_deleted() resp = tenant.push_to_apic(site3) self.assertTrue(resp.ok) time.sleep(2) def create_additional_site_config(self, login, ip_address, password): """ Add the additional site to the configuration :return: Dictionary containing the configuration """ config = super(TestBasicEndpointsWithMultipleRemoteSites, self).create_config_file() site3_config = { "site": { "username": "%s" % login, "name": "Site3", "ip_address": "%s" % ip_address, "password": "%s" % password, "local": "False", "use_https": "False" } } config['config'].append(site3_config) return config def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_additional_site_config(SITE3_LOGIN, SITE3_IPADDR, SITE3_PASSWORD) site3_export_config = { "site": { "name": "Site3", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite-site3" } } ] } } for item in config['config']: if 'export' in item: item['export']['remote_sites'].append(site3_export_config) return config def setup_with_endpoint(self, mac='00:11:22:33:33:33', ip='3.4.3.4'): """ Set up the configuration with an endpoint :return: 2 strings containing the MAC and IP address of the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') return mac, ip def test_basic_add_endpoint(self): """ Test add endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) class TestBasicEndpointsWithMultipleRemoteSitesButOnlyExportToOne(TestBasicEndpointsWithMultipleRemoteSites): def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_additional_site_config(SITE3_LOGIN, SITE3_IPADDR, SITE3_PASSWORD) return config def test_basic_add_endpoint(self): """ Test add endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) class TestBasicEndpointsWithThreeRemoteSites(TestBasicEndpointsWithMultipleRemoteSites): def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = super(TestBasicEndpointsWithThreeRemoteSites, self).create_config_file() site4_config = { "site": { "username": "%s" % SITE4_LOGIN, "name": "Site4", "ip_address": "%s" % SITE4_IPADDR, "password": "%s" % SITE4_PASSWORD, "local": "False", "use_https": "False" } } config['config'].append(site4_config) site4_export_config = { "site": { "name": "Site4", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite-site4" } } ] } } for item in config['config']: if 'export' in item: item['export']['remote_sites'].append(site4_export_config) return config def test_basic_add_endpoint(self): """ Test add endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) class TestBasicMacMove(BaseEndpointTestCase): """ Basic test for MAC move. i.e. the same IP address appears with a different MAC address. This case can appear in failovers such as redundant loadbalancers """ def test_basic_mac_move(self): """ Test basic MAC move """ args = self.get_args() self.write_config_file(self.create_config_file(), args) execute_tool(args, test_mode=True) ip = '3.4.3.4' mac = '00:11:22:33:33:33' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) mac = '00:11:22:33:44:44' self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') self.remove_endpoint('00:11:22:33:33:33', ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) class TestMultipleEPG(BaseTestCase): """ Test multiple EPGs """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app1 = AppProfile('app1', tenant) epg1 = EPG('epg1', app1) app2 = AppProfile('app2', tenant) epg2 = EPG('epg2', app2) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app1", "epg": "epg1", "remote_epg": "intersite-testsuite-app1-epg1", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app2", "epg": "epg2", "remote_epg": "intersite-testsuite-app2-epg2", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config def test_basic_add_endpoint(self): """ Test add endpoint """ args = self.get_args() config = self.create_config_file() config_filename = 'testsuite_cfg.json' args.config = config_filename config_file = open(config_filename, 'w') config_file.write(str(json.dumps(config))) config_file.close() execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ args = self.get_args() config = self.create_config_file() config_filename = 'testsuite_cfg.json' args.config = config_filename config_file = open(config_filename, 'w') config_file.write(str(json.dumps(config))) config_file.close() execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2') mac3 = '00:11:22:33:33:36' ip3 = '3.4.3.7' self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app2-epg2')) self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app2-epg2')) def test_basic_remove_endpoint(self): """ Test remove the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) def test_basic_remove_one_of_multiple_endpoint(self): """ Test remove one of multiple endpoints """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app2-epg2')) self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1') self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app1-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app2-epg2')) class BaseExistingEndpointsTestCase(BaseTestCase): """ Base class for tests where endpoints already exist """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config class TestBasicExistingEndpoints(BaseExistingEndpointsTestCase): def test_basic_add_endpoint(self): """ Test add the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) class BaseExistingEndpointsWith3RemoteSites(BaseExistingEndpointsTestCase): def setup_remote_tenant(self, url, login, password, tenant_name): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site = Session(url, login, password) resp = site.login() self.assertTrue(resp.ok) tenant = Tenant(tenant_name) l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote sites """ self.setup_remote_tenant(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD, 'intersite-testsuite-site-2') self.setup_remote_tenant(SITE3_URL, SITE3_LOGIN, SITE3_PASSWORD, 'intersite-testsuite-site-3') self.setup_remote_tenant(SITE4_URL, SITE4_LOGIN, SITE4_PASSWORD, 'intersite-testsuite-site-4') def teardown_remote_tenant(self, url, login, password, tenant_name): """ Teardown the remote site configuration """ site2 = Session(url, login, password) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant(tenant_name) tenant.mark_as_deleted() resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def teardown_remote_site(self): """ Teardown the remote sites """ self.teardown_remote_tenant(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD, 'intersite-testsuite-site-2') self.teardown_remote_tenant(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD, 'intersite-testsuite-site-3') self.teardown_remote_tenant(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD, 'intersite-testsuite-site-4') time.sleep(2) def add_remote_site_to_config_file(self, config, site_name, ip_address, login, password, tenant_name): site_config = { "site": { "username": "%s" % login, "name": "%s" % site_name, "ip_address": "%s" % ip_address, "password": "%s" % password, "local": "False", "use_https": "False" } } site_export_config = { "site": { "name": site_name, "interfaces": [ { "l3out": { "name": "l3out", "tenant": tenant_name } } ] } } for item in config['config']: if 'export' in item: item['export']['remote_sites'].append(site_export_config) config['config'].append(site_config) return config def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ ] } } config['config'].append(export_policy) config = self.add_remote_site_to_config_file(config, 'Site2', SITE2_IPADDR, SITE2_LOGIN, SITE2_PASSWORD, 'intersite-testsuite-site2') config = self.add_remote_site_to_config_file(config, 'Site3', SITE3_IPADDR, SITE3_LOGIN, SITE3_PASSWORD, 'intersite-testsuite-site3') config = self.add_remote_site_to_config_file(config, 'Site4', SITE4_IPADDR, SITE4_LOGIN, SITE4_PASSWORD, 'intersite-testsuite-site4') return config class TestBasicExistingEndpointsWith3RemoteSites(BaseExistingEndpointsWith3RemoteSites): def test_basic_add_endpoint(self): """ Test add the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site2', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test remove the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site2', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site2', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) class TestLargeScaleExistingEndpointsWith3RemoteSites(BaseExistingEndpointsWith3RemoteSites): def setup_local_site(self): """ Set up the local site """ for i in range(0, 3): # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) # Create the physical interface object intf = Interface('eth', '1', '101', '1', '38') vlan_intf = L2Interface('vlan-5', 'vlan', '5') vlan_intf.attach(intf) # Attach the EPG to the VLAN interface epg.attach(vlan_intf) for j in range(0, 254): mac = '00:11:22:33:%s:%s' % (hex(i)[2:].zfill(2), hex(j)[2:].zfill(2)) ip = '3.4.%s.%s' % (i, j) ep = Endpoint(mac, epg) ep.mac = mac ep.ip = ip l3ep = IPEndpoint(ip, ep) # Assign it to the L2Interface ep.attach(vlan_intf) urls = intf.get_url() jsons = intf.get_json() # Set the the phys domain, infra, and fabric for k in range(0, len(urls)): if jsons[k] is not None: resp = site1.push_to_apic(urls[k], jsons[k]) self.assertTrue(resp.ok) # Push the endpoint resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) time.sleep(1) def verify_remote_site_has_entries(self, tenant_name, l3out_name, remote_epg_name): """ Verify that the remote site has the entry :param mac: String containing the MAC address of the endpoint to find on the remote site :param ip: String containing the IP address of the endpoint to find on the remote site :param tenant_name: String containing the remote tenant name holding the endpoint :param l3out_name: String containing the remote OutsideL3 name holding the endpoint :param remote_epg_name: String containing the remote OutsideEPG on the remote OutsideL3 holding the endpoint :return: True if the remote site has the endpoint. False otherwise """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json?query-target=children' % (tenant_name, l3out_name, remote_epg_name)) resp = site2.get(query) self.assertTrue(resp.ok) subnets = set() for item in resp.json()['imdata']: if 'l3extSubnet' in item: subnets.add(item['l3extSubnet']['attributes']['ip']) for i in range(0, 3): for j in range(0, 254): ip = '3.4.%s.%s/32' % (i, j) if ip not in subnets: return False return True def test_add_large_scale_endpoints(self): """ Test add the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(20) self.assertTrue(self.verify_remote_site_has_entries('intersite-testsuite-site2', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entries('intersite-testsuite-site3', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entries('intersite-testsuite-site4', 'l3out', 'intersite-testsuite-app-epg')) class TestBasicExistingEndpointsAddPolicyLater(BaseTestCase): """ Tests for previously existing endpoints and policy is added later """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ return self.create_site_config() @staticmethod def create_export_policy(): """ Create the export policy :return: Dictionary containing the configuration """ config = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } return config def test_basic_add_endpoint(self): """ Test adding the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(2) config['config'].append(self.create_export_policy()) self.write_config_file(config, args) collector.reload_config() time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_remove_endpoint(self): """ Test removing the endpoint """ args = self.get_args() config = self.create_config_file() config['config'].append(self.create_export_policy()) self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) config = self.create_config_file() self.write_config_file(config, args) collector.reload_config() time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) class TestExportPolicyRemoval(BaseTestCase): """ Tests for export policy removal """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') tenant.mark_as_deleted() resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) time.sleep(2) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg1 = EPG('epg', app) epg2 = EPG('epg2', app) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) time.sleep(2) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) l3out2 = OutsideL3('l3out2', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_diff_epg_config_file(self): """ Create a configuration with different EPGs :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg2", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg2", "remote_epg": "intersite-testsuite-app-epg2", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out2", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config def test_basic_remove_policy(self): """ Test removing the policy """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(4) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg2')) config = self.create_site_config() self.write_config_file(config, args) collector.reload_config() time.sleep(4) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg2')) def test_basic_change_policy_name(self): """ Test changing the policy name """ args = self.get_args() config = self.create_config_file() mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(4) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) config = self.create_diff_epg_config_file() self.write_config_file(config, args) collector.reload_config() time.sleep(4) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg2')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg2')) class BaseTestCaseEndpointsWithContract(BaseTestCase): """ Base class for Tests for endpoints with a contract """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) contract = Contract('contract-1', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self, contract_type): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() if contract_type == 'protected_by': contract_name = 'taboo_name' elif contract_type == 'consumes_interface': contract_name = 'cif_name' else: contract_name = 'contract_name' export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite", contract_type: [ { contract_name: "contract-1" } ] } } ] } } ] } } config['config'].append(export_policy) return config def common_test_basic_add_endpoint(self, contract_type): """ Test adding endpoint """ args = self.get_args() config = self.create_config_file(contract_type) self.write_config_file(config, args) execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry_with_contract(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) def common_test_basic_add_multiple_endpoint(self, contract_type): """ Test adding multiple endpoints """ args = self.get_args() config = self.create_config_file(contract_type) self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) def common_test_basic_remove_endpoint(self, contract_type): """ Test removing endpoint """ args = self.get_args() config = self.create_config_file(contract_type) self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') self.assertFalse(self.verify_remote_site_has_entry_with_contract(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) def common_test_basic_remove_one_of_multiple_endpoint(self, contract_type): """ Test removing one of multiple endpoints """ args = self.get_args() config = self.create_config_file(contract_type) self.write_config_file(config, args) execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') self.assertFalse(self.verify_remote_site_has_entry_with_contract(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) self.assertTrue(self.verify_remote_site_has_entry_with_contract(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg', 'contract-1', contract_type)) class TestBasicEndpointsWithProvidedContract(BaseTestCaseEndpointsWithContract): """ Basic Tests for endpoints with a provided contract """ def test_basic_add_endpoint(self): """ Test adding endpoint """ self.common_test_basic_add_endpoint(contract_type='provides') def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ self.common_test_basic_add_multiple_endpoint(contract_type='provides') def test_basic_remove_endpoint(self): """ Test removing endpoint """ self.common_test_basic_remove_endpoint(contract_type='provides') def test_basic_remove_one_of_multiple_endpoint(self): """ Test removing one of multiple endpoints """ self.common_test_basic_remove_one_of_multiple_endpoint(contract_type='provides') class TestBasicEndpointsWithConsumedContract(BaseTestCaseEndpointsWithContract): """ Basic Tests for endpoints with a consumed contract """ def test_basic_add_endpoint(self): """ Test adding endpoint """ self.common_test_basic_add_endpoint(contract_type='consumes') def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ self.common_test_basic_add_multiple_endpoint(contract_type='consumes') def test_basic_remove_endpoint(self): """ Test removing endpoint """ self.common_test_basic_remove_endpoint(contract_type='consumes') def test_basic_remove_one_of_multiple_endpoint(self): """ Test removing one of multiple endpoints """ self.common_test_basic_remove_one_of_multiple_endpoint(contract_type='consumes') class TestBasicEndpointsWithConsumedContractInterface(BaseTestCaseEndpointsWithContract): """ Basic Tests for endpoints with a consumed contract interface """ def test_basic_add_endpoint(self): """ Test adding endpoint """ self.common_test_basic_add_endpoint(contract_type='consumes_interface') def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ self.common_test_basic_add_multiple_endpoint(contract_type='consumes_interface') def test_basic_remove_endpoint(self): """ Test removing endpoint """ self.common_test_basic_remove_endpoint(contract_type='consumes_interface') def test_basic_remove_one_of_multiple_endpoint(self): """ Test removing one of multiple endpoints """ self.common_test_basic_remove_one_of_multiple_endpoint(contract_type='consumes_interface') class TestBasicEndpointsWithTaboo(BaseTestCaseEndpointsWithContract): """ Basic Tests for endpoints with a Taboo """ def test_basic_add_endpoint(self): """ Test adding endpoint """ self.common_test_basic_add_endpoint(contract_type='protected_by') def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ self.common_test_basic_add_multiple_endpoint(contract_type='protected_by') def test_basic_remove_endpoint(self): """ Test removing endpoint """ self.common_test_basic_remove_endpoint(contract_type='protected_by') def test_basic_remove_one_of_multiple_endpoint(self): """ Test removing one of multiple endpoints """ self.common_test_basic_remove_one_of_multiple_endpoint(contract_type='protected_by') class TestBasicEndpointMove(BaseTestCase): """ Tests for an endpoint that moves """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') context = Context('vrf', tenant) bd = BridgeDomain('bd', tenant) app = AppProfile('app', tenant) epg = EPG('epg1', app) epg2 = EPG('epg2', app) bd.add_context(context) epg.add_bd(bd) epg2.add_bd(bd) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file(self): """ Create the configuration :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg1", "remote_epg": "intersite-testsuite-app-epg1", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg2", "remote_epg": "intersite-testsuite-app-epg2", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite" } } ] } } ] } } config['config'].append(export_policy) return config def setup_with_endpoint(self): """ Set up the local site with the endpoint :return: 2 strings containing the MAC and IP address of the endpoint """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1') return mac, ip def test_basic_add_endpoint(self): """ Test add endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) def test_basic_add_multiple_endpoint(self): """ Test add multiple endpoints """ mac1, ip1 = self.setup_with_endpoint() mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg2')) def test_basic_remove_endpoint(self): """ Test removing the endpoint """ mac, ip = self.setup_with_endpoint() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1') self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) def test_basic_remove_one_of_multiple_endpoint(self): """ Test removing one of multiple endpoints """ mac1, ip1 = self.setup_with_endpoint() mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1') self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg1')) class TestPolicyChangeProvidedContract(BaseTestCase): """ Tests to cover changing the provided contract within the policy """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out = OutsideL3('l3out', tenant) contract = Contract('contract-1', tenant) contract = Contract('contract-2', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def create_config_file_before(self): """ Create the configuration before changing the provided contract :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite", "provides": [ { "contract_name": "contract-1", }, { "contract_name": "contract-2", } ] } } ] } } ] } } config['config'].append(export_policy) return config def create_config_file_after(self): """ Create the configuration after changing the provided contract :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite", "provides": [ { "contract_name": "contract-1" } ] } } ] } } ] } } config['config'].append(export_policy) return config def verify_remote_site_has_entry_before(self, mac, ip): """ Verify that the remote site has the entry before changing the policy :param mac: String containing the endpoint MAC address :param ip: String containing the endpoint IP address :return: True or False. True if the remote site has the entry """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree') resp = site2.get(query) self.assertTrue(resp.ok) # Look for l3extInstP found = False for item in resp.json()['imdata']: if 'l3extInstP' in item: if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg': found = True break if not found: return False # Verify that the l3extInstP is providing the contracts found_contract1 = False found_contract2 = False for item in resp.json()['imdata']: if 'fvRsProv' in item: if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1': found_contract1 = True if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2': found_contract2 = True if not found_contract1 or not found_contract2: return False # Look for l3extSubnet query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out' '/instP-intersite-testsuite-app-epg.json?query-target=subtree') resp = site2.get(query) self.assertTrue(resp.ok) # Look for l3extSubnet found = False for item in resp.json()['imdata']: if 'l3extSubnet' in item: if item['l3extSubnet']['attributes']['name'] == ip: found = True break if not found: return False return True def verify_remote_site_has_entry_after(self, mac, ip): """ Verify that the remote site has the entry after changing the policy :param mac: String containing the endpoint MAC address :param ip: String containing the endpoint IP address :return: True or False. True if the remote site has the entry """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree') resp = site2.get(query) self.assertTrue(resp.ok) # Look for l3extInstP found = False for item in resp.json()['imdata']: if 'l3extInstP' in item: if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg': found = True break if not found: return False # Verify that the l3extInstP is providing the contract found_contract1 = False found_contract2 = False for item in resp.json()['imdata']: if 'fvRsProv' in item: if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1': found_contract1 = True if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2': found_contract2 = True if not found_contract1 or found_contract2: return False # Look for l3extSubnet query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out' '/instP-intersite-testsuite-app-epg.json?query-target=subtree') resp = site2.get(query) self.assertTrue(resp.ok) # Look for l3extSubnet found = False for item in resp.json()['imdata']: if 'l3extSubnet' in item: if item['l3extSubnet']['attributes']['ip'] == ip + '/32': found = True break if not found: return False return True def test_basic_add_endpoint(self): """ Test add endpoint """ args = self.get_args() config = self.create_config_file_before() self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry_before(mac, ip)) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_before(mac, ip)) config = self.create_config_file_after() self.write_config_file(config, args) collector.reload_config() time.sleep(4) self.assertTrue(self.verify_remote_site_has_entry_after(mac, ip)) def test_basic_add_multiple_endpoint(self): """ Test adding multiple endpoints """ args = self.get_args() config = self.create_config_file_before() self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1)) self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2)) config = self.create_config_file_after() self.write_config_file(config, args) collector.reload_config() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1)) self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2)) class TestChangeL3Out(BaseTestCase): """ Tests for changing OutsideL3 interfaces """ def setup_local_site(self): """ Set up the local site """ # create Tenant, App, EPG on site 1 site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite') l3out1 = OutsideL3('l3out1', tenant) l3out2 = OutsideL3('l3out2', tenant) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) @staticmethod def create_export_policy(l3out_name): """ Create the export policy :param l3out_name: String containing the OutsideL3 name :return: Dictionary containing the export policy """ export_policy = { "export": { "tenant": "intersite-testsuite", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": l3out_name, "tenant": "intersite-testsuite" } } ] } } ] } } return export_policy def create_config_file(self, l3out_name): """ Create the configuration :param l3out_name: String containing the OutsideL3 name :return: Dictionary containing the configuration """ config = self.create_site_config() export_policy = self.create_export_policy(l3out_name) config['config'].append(export_policy) return config def test_basic_add_endpoint(self): """ Basic test for adding endpoint """ args = self.get_args() config = self.create_config_file('l3out1') self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) config = self.create_config_file('l3out2') self.write_config_file(config, args) collector.reload_config() time.sleep(4) self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) def test_basic_add_endpoint_multiple_l3out(self): """ Test adding endpoint with multiple OutsideL3 interfaces """ args = self.get_args() config = self.create_config_file('l3out1') for policy in config['config']: if 'export' in policy: for site_policy in policy['export']['remote_sites']: interface_policy = {"l3out": {"name": "l3out2", "tenant": "intersite-testsuite"}} site_policy['site']['interfaces'].append(interface_policy) policy['export']['remote_sites'].append(site_policy) self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) config = self.create_config_file('l3out2') self.write_config_file(config, args) collector.reload_config() time.sleep(4) self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) def test_basic_add_multiple_endpoint(self): """ Test adding multiple endopoints """ args = self.get_args() config = self.create_config_file('l3out1') self.write_config_file(config, args) collector = execute_tool(args, test_mode=True) time.sleep(2) mac1 = '00:11:22:33:33:34' ip1 = '3.4.3.5' self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg') mac2 = '00:11:22:33:33:35' ip2 = '3.4.3.6' self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out1', 'intersite-testsuite-app-epg')) config = self.create_config_file('l3out2') self.write_config_file(config, args) collector.reload_config() time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out2', 'intersite-testsuite-app-epg')) # test basic install of a single EPG and 1 endpoint being pushed to other site # test remove EPG from policy and that class TestDuplicates(BaseTestCase): """ Test duplicate existing entry on the remote site """ def create_config_file(self): """ Create the configuration file :return: dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite-local", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out", "tenant": "intersite-testsuite-remote" } } ] } } ] } } config['config'].append(export_policy) return config def setup_local_site(self): """ Set up the local site """ site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-local') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') l3out = OutsideL3('l3out', tenant) epg = OutsideEPG('intersite-testsuite-app-epg', l3out) other_epg = OutsideEPG('other', l3out) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def teardown_local_site(self): """ Tear down the local site """ site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-local') tenant.mark_as_deleted() resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def teardown_remote_site(self): """ Tear down the remote site """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') tenant.mark_as_deleted() resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def add_remote_duplicate_entry(self, ip): """ Add a remote entry :param ip: String containing the IP address :return: None """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') l3out = OutsideL3('l3out', tenant) other_epg = OutsideEPG('other', l3out) subnet = OutsideNetwork(ip, other_epg) subnet.ip = ip + '/32' resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def test_basic_duplicate(self): """ Test a basic duplicate entry scenario. An existing entry exists on the remote site but on a different OutsideEPG on the same OutsideL3. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_multiple_duplicate(self): """ Test a basic multiple duplicate entry scenario. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) def test_basic_partial_duplicate(self): """ Test a basic multiple duplicate entry scenario where some of the entries in the set being added are duplicate. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) for i in range(0, 7): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) for i in range(4, 9): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) for i in range(4, 9): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg')) class SetupDuplicateTests(BaseTestCase): """ Base class to setup the duplicate tests """ def create_config_file(self): """ Create the configuration file :return: dictionary containing the configuration """ config = self.create_site_config() export_policy = { "export": { "tenant": "intersite-testsuite-local", "app": "app", "epg": "epg", "remote_epg": "intersite-testsuite-app-epg", "remote_sites": [ { "site": { "name": "Site2", "interfaces": [ { "l3out": { "name": "l3out1", "tenant": "intersite-testsuite-remote" } }, { "l3out": { "name": "l3out2", "tenant": "intersite-testsuite-remote" } } ] } } ] } } config['config'].append(export_policy) return config def setup_local_site(self): """ Set up the local site """ site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-local') app = AppProfile('app', tenant) epg = EPG('epg', app) resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def setup_remote_site(self): """ Set up the remote site """ # Create tenant, L3out with contract on site 2 site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') l3out1 = OutsideL3('l3out1', tenant) l3out2 = OutsideL3('l3out2', tenant) epg1 = OutsideEPG('intersite-testsuite-app-epg', l3out1) other_epg = OutsideEPG('other', l3out1) epg2 = OutsideEPG('intersite-testsuite-app-epg', l3out2) resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def teardown_local_site(self): """ Tear down the local site """ site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD) resp = site1.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-local') tenant.mark_as_deleted() resp = tenant.push_to_apic(site1) self.assertTrue(resp.ok) def teardown_remote_site(self): """ Tear down the remote site """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') tenant.mark_as_deleted() resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) class TestDuplicatesTwoL3Outs(SetupDuplicateTests): """ Test duplicate entries with 2 OutsideL3 interfaces on the remote site """ def add_remote_duplicate_entry(self, ip): """ Add a remote entry :param ip: String containing the IP address :return: None """ site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tenant = Tenant('intersite-testsuite-remote') l3out = OutsideL3('l3out1', tenant) other_epg = OutsideEPG('other', l3out) subnet = OutsideNetwork(ip, other_epg) subnet.ip = ip + '/32' resp = tenant.push_to_apic(site2) self.assertTrue(resp.ok) def test_basic_duplicate(self): """ Test a basic duplicate entry scenario. An existing entry exists on the remote site but on a different OutsideEPG on the same OutsideL3. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) mac = '00:11:22:33:33:33' ip = '3.4.3.4' self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') mac2 = '00:11:22:33:33:44' ip2 = '3.4.3.44' self.add_endpoint(mac2, ip2, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) def test_basic_multiple_duplicate(self): """ Test a basic multiple duplicate entry scenario. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) for i in range(0, 5): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) def test_basic_partial_duplicate(self): """ Test a basic multiple duplicate entry scenario where some of the entries in the set being added are duplicate. """ args = self.get_args() config = self.create_config_file() self.write_config_file(config, args) execute_tool(args, test_mode=True) for i in range(0, 7): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) self.add_remote_duplicate_entry(ip) time.sleep(2) for i in range(4, 9): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg') time.sleep(2) for i in range(4, 9): mac = '00:11:22:33:33:3' + str(i) ip = '3.4.3.' + str(i) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out1', 'intersite-testsuite-app-epg')) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out2', 'intersite-testsuite-app-epg')) class TestDeletions(BaseEndpointTestCase): """ Tests for deletion of stale entries """ def test_basic_deletion(self): """ Test basic deletion of a stale entry on tool startup :return: """ args = self.get_args() config_filename = 'testsuite_cfg.json' args.config = config_filename config = self.create_config_file() config_file = open(config_filename, 'w') config_file.write(str(json.dumps(config))) config_file.close() # Create the "stale" entry on the remote site mac = '00:11:22:33:33:33' ip = '3.4.3.4' site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD) resp = site2.login() self.assertTrue(resp.ok) tag = IntersiteTag('intersite-testsuite', 'app', 'epg', 'Site1') remote_tenant = Tenant('intersite-testsuite') remote_l3out = OutsideL3('l3out', remote_tenant) remote_epg = OutsideEPG('intersite-testsuite-app-epg', remote_l3out) remote_ep = OutsideNetwork(ip, remote_epg) remote_ep.ip = ip + '/32' remote_tenant.push_to_apic(site2) time.sleep(2) self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) execute_tool(args, test_mode=True) time.sleep(2) self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out', 'intersite-testsuite-app-epg')) class TestCli(BaseTestCase): """ Tests for the CLI """ def setup_remote_site(self): """ Set up the remote site. """ pass def setup_local_site(self): """ Set up the local site. """ pass def teardown_local_site(self): """ Teardown the local site configuration """ pass def teardown_remote_site(self): """ Teardown the remote site configuration """ pass def _create_commandline(self): """ Internal function to create a CommandLine instance """ args = self.get_args() self.write_config_file(self.create_site_config(), args) cmdline = CommandLine(execute_tool(args, test_mode=True)) self.assertTrue(isinstance(cmdline, CommandLine)) return cmdline def _test_show_cmd(self, cmd, output): """ Internal common function for checking show commands :param cmd: String containing show command keyword :param output: List of strings to compare with the command output """ cmdline = self._create_commandline() temp = sys.stdout fake_out = FakeStdio() sys.stdout = fake_out cmdline.do_show(cmd) sys.stdout = temp self.assertTrue(fake_out.verify_output(output)) def test_show_debug(self): """ Test show debug command """ self._test_show_cmd('debug', ['Debug level currently set to: CRITICAL', '\n']) def test_show_configfile(self): """ Test show configfile command """ self._test_show_cmd('configfile', ['Configuration file is set to: testsuite_cfg.json', '\n']) def test_show_config(self): """ Test show config command """ self._test_show_cmd('config', [json.dumps(self.create_site_config(), indent=4, separators=(',', ':')), '\n']) def test_show_sites(self): """ Test show sites command """ self._test_show_cmd('sites', [u'Site1', ' ', ':', ' ', 'Connected', '\n', u'Site2', ' ', ':', ' ', 'Connected', '\n']) def test_show_stats(self): """ Test show stats command """ self._test_show_cmd('stats', ['Endpoint addition events: 0', '\n', 'Endpoint deletion events: 0', '\n']) def main_test(): """ Main execution routine. Create the test suites and run. """ full = unittest.TestSuite() full.addTest(unittest.makeSuite(TestToolOptions)) full.addTest(unittest.makeSuite(TestBadConfiguration)) full.addTest(unittest.makeSuite(TestBasicEndpoints)) full.addTest(unittest.makeSuite(TestMultipleEPG)) full.addTest(unittest.makeSuite(TestBasicExistingEndpoints)) full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater)) full.addTest(unittest.makeSuite(TestExportPolicyRemoval)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithProvidedContract)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithConsumedContract)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithConsumedContractInterface)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithTaboo)) full.addTest(unittest.makeSuite(TestBasicEndpointMove)) full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract)) full.addTest(unittest.makeSuite(TestChangeL3Out)) full.addTest(unittest.makeSuite(TestDuplicates)) full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs)) full.addTest(unittest.makeSuite(TestDeletions)) full.addTest(unittest.makeSuite(TestCli)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithMultipleRemoteSites)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithMultipleRemoteSitesButOnlyExportToOne)) full.addTest(unittest.makeSuite(TestBasicEndpointsWithThreeRemoteSites)) unittest.main() if __name__ == '__main__': try: main_test() except KeyboardInterrupt: pass
38.430521
138
0.52422
14,003
140,771
5.085767
0.036992
0.113738
0.063988
0.057628
0.84825
0.831063
0.810281
0.798514
0.78106
0.763606
0
0.02532
0.373514
140,771
3,662
139
38.441016
0.7822
0.112353
0
0.728501
0
0.002048
0.161722
0.060317
0
0
0
0
0.107289
1
0.072072
false
0.03317
0.006143
0
0.110565
0.000819
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7c37bdf6f75ec9ca42d9532bcd9052dbb3b16b40
803
py
Python
tests/test_ping.py
libero/search
f13c7fe2aa5f3cd1e2f62234995788bed7147b91
[ "MIT" ]
null
null
null
tests/test_ping.py
libero/search
f13c7fe2aa5f3cd1e2f62234995788bed7147b91
[ "MIT" ]
14
2019-01-31T08:34:30.000Z
2019-11-21T10:06:13.000Z
tests/test_ping.py
giorgiosironi/search
4a117c88c59627041c2058c9a41b69b01e2f3fcc
[ "MIT" ]
3
2019-01-30T10:49:01.000Z
2019-06-11T14:42:03.000Z
def test_http_1_0_ping_response(client) -> None: response = client.get('/ping', environ_overrides={'SERVER_PROTOCOL': 'HTTP/1.0'}) assert response.status_code == 200 assert response.content_type == 'text/plain; charset=utf-8' assert response.data == b'pong' assert response.headers['Cache-Control'] == 'no-store, must-revalidate' assert response.headers['Expires'] == '0' def test_http_1_1_ping_response(client) -> None: response = client.get('/ping', environ_overrides={'SERVER_PROTOCOL': 'HTTP/1.1'}) assert response.status_code == 200 assert response.content_type == 'text/plain; charset=utf-8' assert response.data == b'pong' assert response.headers['Cache-Control'] == 'no-store, must-revalidate' assert response.headers.get('Expires') is None
38.238095
85
0.704857
108
803
5.074074
0.342593
0.255474
0.153285
0.043796
0.905109
0.905109
0.905109
0.905109
0.905109
0.905109
0
0.024745
0.144458
803
20
86
40.15
0.772926
0
0
0.571429
0
0
0.25593
0
0
0
0
0
0.714286
1
0.142857
false
0
0
0
0.142857
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
7c415a354fe892db7aeed45962a7bfcf91437986
31,191
py
Python
closed/Lenovo/configs/3d-unet/Offline/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
12
2021-09-23T08:05:57.000Z
2022-03-21T03:52:11.000Z
closed/Lenovo/configs/3d-unet/Offline/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
11
2021-09-23T20:34:06.000Z
2022-01-22T07:58:02.000Z
closed/Lenovo/configs/3d-unet/Offline/__init__.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
16
2021-09-23T20:26:38.000Z
2022-03-09T12:59:56.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.getcwd()) from importlib import import_module from code.common.constants import Benchmark, Scenario from code.common.system_list import System, Architecture, MIGConfiguration, MIGSlice from configs.configuration import * ParentConfig = import_module("configs.3d-unet") GPUBaseConfig = ParentConfig.GPUBaseConfig CPUBaseConfig = ParentConfig.CPUBaseConfig class OfflineGPUBaseConfig(GPUBaseConfig): scenario = Scenario.Offline gpu_inference_streams = 1 gpu_copy_streams = 2 class OfflineCPUBaseConfig(CPUBaseConfig): scenario = Scenario.Offline @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GBx1(OfflineGPUBaseConfig): system = System("A100-PCIe-80GB", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 53 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GBx1_HighAccuracy(A100_PCIe_80GBx1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GBx1_Triton(A100_PCIe_80GBx1): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GBx1_HighAccuracy_Triton(A100_PCIe_80GBx1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GBx8(A100_PCIe_80GBx1): system = System("A100-PCIe-80GB", Architecture.Ampere, 8) gpu_batch_size = 2 offline_expected_qps = 412 numa_config = "3:0-15&2:16-31&1:32-47&0:48-63&7:64-79&6:80-95&5:96-111&4:112-127" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GBx8_HighAccuracy(A100_PCIe_80GBx8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GBx8_Triton(A100_PCIe_80GBx8): input_dtype = "fp16" input_format = "dhwc8" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_dhwc8" use_triton = True output_pinned_memory = False @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GBx8_HighAccuracy_Triton(A100_PCIe_80GBx8_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_PCIe_80GBx8_MaxQ(A100_PCIe_80GBx8): gpu_batch_size = 2 offline_expected_qps = 370 power_limit = 175 numa_config = "3:0-7&2:8-15&1:16-23&0:24-31&7:32-39&6:40-47&5:48-55&4:56-63" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_PCIe_80GBx8_HighAccuracy_MaxQ(A100_PCIe_80GBx8_MaxQ): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_PCIe_80GBx8_Triton_MaxQ(A100_PCIe_80GBx8_MaxQ): use_triton = True offline_expected_qps = 412 @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_PCIe_80GBx8_HighAccuracy_Triton_MaxQ(A100_PCIe_80GBx8_Triton_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x1(OfflineGPUBaseConfig): system = System("A100-PCIe-80GB", Architecture.Ampere, 1, cpu_arch=CPUArch.aarch64) gpu_batch_size = 2 offline_expected_qps = 53 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x1_HighAccuracy(A100_PCIe_80GB_aarch64x1): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x2(OfflineGPUBaseConfig): system = System("A100-PCIe-80GB", Architecture.Ampere, 2, cpu_arch=CPUArch.aarch64) gpu_batch_size = 2 offline_expected_qps = 106 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x2_HighAccuracy(A100_PCIe_80GB_aarch64x2): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x4(OfflineGPUBaseConfig): system = System("A100-PCIe-80GB", Architecture.Ampere, 4, cpu_arch=CPUArch.aarch64) gpu_batch_size = 2 offline_expected_qps = 210 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_80GB_aarch64x4_HighAccuracy(A100_PCIe_80GB_aarch64x4): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_PCIe_80GB_aarch64x4_MaxQ(OfflineGPUBaseConfig): system = System("A100-PCIe-80GB", Architecture.Ampere, 4, cpu_arch=CPUArch.aarch64) gpu_batch_size = 2 # TODO: Set power_limit properly power_limit = 200 offline_expected_qps = 185 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_PCIe_80GB_aarch64x4_HighAccuracy_MaxQ(A100_PCIe_80GB_aarch64x4_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_MIG_1x1g5gb(OfflineGPUBaseConfig): _mig_configuration = MIGConfiguration({0: {MIGSlice(1, 5): 1}}) system = System("A100-PCIe", Architecture.Ampere, 1, mig_conf=_mig_configuration) input_dtype = "fp16" input_format = "linear" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_linear" workspace_size = 1073741824 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 7 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_MIG_1x1g5gb_HighAccuracy(A100_PCIe_MIG_1x1g5gb): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIe_MIG_1x1g5gb_Triton(A100_PCIe_MIG_1x1g5gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIe_MIG_1x1g5gb_HighAccuracy_Triton(A100_PCIe_MIG_1x1g5gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIex1(OfflineGPUBaseConfig): system = System("A100-PCIe", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 53 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIex1_HighAccuracy(A100_PCIex1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIex1_Triton(A100_PCIex1): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIex1_HighAccuracy_Triton(A100_PCIex1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIex8(A100_PCIex1): system = System("A100-PCIe", Architecture.Ampere, 8) gpu_batch_size = 2 offline_expected_qps = 412 numa_config = "3:0-15&2:16-31&1:32-47&0:48-63&7:64-79&6:80-95&5:96-111&4:112-127" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIex8_HighAccuracy(A100_PCIex8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_PCIex8_Triton(A100_PCIex8): input_dtype = "fp16" input_format = "dhwc8" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_dhwc8" use_triton = True output_pinned_memory = False @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_PCIex8_HighAccuracy_Triton(A100_PCIex8_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_PCIex8_MaxQ(A100_PCIex8): gpu_batch_size = 2 offline_expected_qps = 370 power_limit = 175 numa_config = "3:0-7&2:8-15&1:16-23&0:24-31&7:32-39&6:40-47&5:48-55&4:56-63" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_PCIex8_HighAccuracy_MaxQ(A100_PCIex8_MaxQ): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_PCIex8_Triton_MaxQ(A100_PCIex8_MaxQ): gpu_batch_size = 2 offline_expected_qps = 412 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_PCIex8_HighAccuracy_Triton_MaxQ(A100_PCIex8_Triton_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb(OfflineGPUBaseConfig): _mig_configuration = MIGConfiguration({0: {MIGSlice(1, 10): 1}}) system = System("A100-SXM-80GB", Architecture.Ampere, 1, mig_conf=_mig_configuration) input_dtype = "fp16" input_format = "linear" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_linear" workspace_size = 1073741824 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 7 start_from_device = True @ConfigRegistry.register(HarnessType.HeteroMIG, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb_Hetero(A100_SXM_80GB_MIG_1x1g10gb): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb_HighAccuracy(A100_SXM_80GB_MIG_1x1g10gb): pass @ConfigRegistry.register(HarnessType.HeteroMIG, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb_Hetero_HighAccuracy(A100_SXM_80GB_MIG_1x1g10gb_HighAccuracy): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb_Triton(A100_SXM_80GB_MIG_1x1g10gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GB_MIG_1x1g10gb_HighAccuracy_Triton(A100_SXM_80GB_MIG_1x1g10gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GB_MIG_56x1g10gb(A100_SXM_80GB_MIG_1x1g10gb): _mig_configuration = MIGConfiguration({ 0: {MIGSlice(1, 10): 7}, 1: {MIGSlice(1, 10): 7}, 2: {MIGSlice(1, 10): 7}, 3: {MIGSlice(1, 10): 7}, 4: {MIGSlice(1, 10): 7}, 5: {MIGSlice(1, 10): 7}, 6: {MIGSlice(1, 10): 7}, 7: {MIGSlice(1, 10): 7}, }) system = System("A100-SXM-80GB", Architecture.Ampere, 8, mig_conf=_mig_configuration) gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 392 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GB_MIG_56x1g10gb_HighAccuracy(A100_SXM_80GB_MIG_56x1g10gb): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GB_MIG_56x1g10gb_Triton(A100_SXM_80GB_MIG_56x1g10gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GB_MIG_56x1g10gb_HighAccuracy_Triton(A100_SXM_80GB_MIG_56x1g10gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx1(OfflineGPUBaseConfig): system = System("A100-SXM-80GB", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 60 start_from_device = True @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx1_HighAccuracy(A100_SXM_80GBx1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx1_Triton(A100_SXM_80GBx1): instance_group_count = 1 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx1_HighAccuracy_Triton(A100_SXM_80GBx1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx4(OfflineGPUBaseConfig): _system_alias = "DGX Station A100 - Red October" _notes = "This should not inherit from A100_SXM_80GB (DGX-A100), and cannot use start_from_device" system = System("A100-SXM-80GB", Architecture.Ampere, 4) gpu_batch_size = 2 offline_expected_qps = 220 numa_config = "3:0-15,64-79&2:16-31,80-95&1:32-47,96-111&0:48-63,112-127" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx4_HighAccuracy(A100_SXM_80GBx4): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx4_Triton(A100_SXM_80GBx4): instance_group_count = 1 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx4_HighAccuracy_Triton(A100_SXM_80GBx4_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM_80GBx4_MaxQ(A100_SXM_80GBx4): gpu_batch_size = 2 offline_expected_qps = 220 power_limit = 225 numa_config = "3:0-7,32-39&2:8-15,40-47&1:16-23,48-55&0:24-31,56-63" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM_80GBx4_HighAccuracy_MaxQ(A100_SXM_80GBx4_MaxQ): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM_80GBx4_Triton_MaxQ(A100_SXM_80GBx4_MaxQ): numa_config = "" # TODO: Artifact from old configs. Should Red October Triton use numa_config? instance_group_count = 1 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM_80GBx4_HighAccuracy_Triton_MaxQ(A100_SXM_80GBx4_Triton_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx8(A100_SXM_80GBx1): system = System("A100-SXM-80GB", Architecture.Ampere, 8) gpu_batch_size = 2 offline_expected_qps = 480 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx8_HighAccuracy(A100_SXM_80GBx8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM_80GBx8_Triton(A100_SXM_80GBx8): use_graphs = True instance_group_count = 4 use_triton = True output_pinned_memory = False @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM_80GBx8_HighAccuracy_Triton(A100_SXM_80GBx8_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM_80GBx8_MaxQ(A100_SXM_80GBx8): gpu_batch_size = 2 offline_expected_qps = 480 power_limit = 225 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM_80GBx8_HighAccuracy_MaxQ(A100_SXM_80GBx8_MaxQ): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM_80GBx8_Triton_MaxQ(A100_SXM_80GBx8_MaxQ): instance_group_count = 2 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM_80GBx8_HighAccuracy_Triton_MaxQ(A100_SXM_80GBx8_Triton_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GB_MIG_1x1g5gb(OfflineGPUBaseConfig): _mig_configuration = MIGConfiguration({0: {MIGSlice(1, 5): 1}}) system = System("A100-SXM4-40GB", Architecture.Ampere, 1, mig_conf=_mig_configuration) input_dtype = "fp16" input_format = "linear" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_linear" workspace_size = 1073741824 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 7 start_from_device = True @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GB_MIG_1x1g5gb_HighAccuracy(A100_SXM4_40GB_MIG_1x1g5gb): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GB_MIG_1x1g5gb_Triton(A100_SXM4_40GB_MIG_1x1g5gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GB_MIG_1x1g5gb_HighAccuracy_Triton(A100_SXM4_40GB_MIG_1x1g5gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GBx1(OfflineGPUBaseConfig): system = System("A100-SXM4-40GB", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 60 start_from_device = True @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GBx1_HighAccuracy(A100_SXM4_40GBx1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GBx1_Triton(A100_SXM4_40GBx1): instance_group_count = 1 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GBx1_HighAccuracy_Triton(A100_SXM4_40GBx1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GBx8(A100_SXM4_40GBx1): system = System("A100-SXM4-40GB", Architecture.Ampere, 8) offline_expected_qps = 480 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GBx8_HighAccuracy(A100_SXM4_40GBx8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A100_SXM4_40GBx8_Triton(A100_SXM4_40GBx8): instance_group_count = 4 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A100_SXM4_40GBx8_HighAccuracy_Triton(A100_SXM4_40GBx8_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM4_40GBx8_MaxQ(A100_SXM4_40GBx8): power_limit = 225 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM4_40GBx8_HighAccuracy_MaxQ(A100_SXM4_40GBx8_MaxQ): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxQ) class A100_SXM4_40GBx8_Triton_MaxQ(A100_SXM4_40GBx8_MaxQ): instance_group_count = 2 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class A100_SXM4_40GBx8_HighAccuracy_Triton_MaxQ(A100_SXM4_40GBx8_Triton_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A10x1(OfflineGPUBaseConfig): system = System("A10", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 22 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A10x1_HighAccuracy(A10x1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A10x1_Triton(A10x1): gpu_batch_size = 2 offline_expected_qps = 20 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A10x1_HighAccuracy_Triton(A10x1_Triton): offline_expected_qps = 22 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A10x8(A10x1): system = System("A10", Architecture.Ampere, 8) offline_expected_qps = 170 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A10x8_HighAccuracy(A10x8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A10x8_Triton(A10x8): gpu_batch_size = 2 offline_expected_qps = 160.0 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A10x8_HighAccuracy_Triton(A10x8_Triton): offline_expected_qps = 170 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A30_MIG_1x1g6gb(OfflineGPUBaseConfig): _mig_configuration = MIGConfiguration({0: {MIGSlice(1, 6): 1}}) system = System("A30", Architecture.Ampere, 1, mig_conf=_mig_configuration) input_dtype = "fp16" input_format = "linear" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_linear" workspace_size = 805306368 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 7.55 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30_MIG_1x1g6gb_HighAccuracy(A30_MIG_1x1g6gb): pass @ConfigRegistry.register(HarnessType.HeteroMIG, AccuracyTarget.k_99, PowerSetting.MaxP) class A30_MIG_1x1g6gb_Hetero(A30_MIG_1x1g6gb): offline_expected_qps = 6.847 @ConfigRegistry.register(HarnessType.HeteroMIG, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30_MIG_1x1g6gb_Hetero_HighAccuracy(A30_MIG_1x1g6gb_Hetero): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A30_MIG_1x1g6gb_Triton(A30_MIG_1x1g6gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30_MIG_1x1g6gb_HighAccuracy_Triton(A30_MIG_1x1g6gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A30_MIG_32x1g6gb(OfflineGPUBaseConfig): _mig_configuration = MIGConfiguration({ 0: {MIGSlice(1, 6): 4}, 1: {MIGSlice(1, 6): 4}, 2: {MIGSlice(1, 6): 4}, 3: {MIGSlice(1, 6): 4}, 4: {MIGSlice(1, 6): 4}, 5: {MIGSlice(1, 6): 4}, 6: {MIGSlice(1, 6): 4}, 7: {MIGSlice(1, 6): 4}, }) system = System("A30", Architecture.Ampere, 8, mig_conf=_mig_configuration) input_dtype = "fp16" input_format = "linear" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_linear" workspace_size = 805306368 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 224 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30_MIG_32x1g6gb_HighAccuracy(A30_MIG_32x1g6gb): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A30_MIG_32x1g6gb_Triton(A30_MIG_32x1g6gb): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30_MIG_32x1g6gb_HighAccuracy_Triton(A30_MIG_32x1g6gb_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A30x1(OfflineGPUBaseConfig): system = System("A30", Architecture.Ampere, 1) gpu_batch_size = 2 offline_expected_qps = 30.74 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30x1_HighAccuracy(A30x1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A30x1_Triton(A30x1): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30x1_HighAccuracy_Triton(A30x1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class A30x8(A30x1): system = System("A30", Architecture.Ampere, 8) offline_expected_qps = 230 numa_config = "3:0-15&2:16-31&1:32-47&0:48-63&7:64-79&6:80-95&5:96-111&4:112-127" @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30x8_HighAccuracy(A30x8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class A30x8_Triton(A30x8): input_dtype = "fp16" input_format = "dhwc8" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_dhwc8" gpu_batch_size = 2 offline_expected_qps = 230 use_triton = True output_pinned_memory = False @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class A30x8_HighAccuracy_Triton(A30x8_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class AGX_Xavier(OfflineGPUBaseConfig): system = System("AGX_Xavier", Architecture.Xavier, 1, cpu_arch=CPUArch.aarch64) gpu_batch_size = 1 offline_expected_qps = 3 use_direct_host_access = True @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class AGX_Xavier_HighAccuracy(AGX_Xavier): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class AGX_Xavier_Triton(AGX_Xavier): offline_expected_qps = 3 use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class AGX_Xavier_HighAccuracy_Triton(AGX_Xavier_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class AGX_Xavier_MaxQ(AGX_Xavier): offline_expected_qps = 2.1 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class AGX_Xavier_HighAccuracy_MaxQ(AGX_Xavier_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class Xavier_NX(OfflineGPUBaseConfig): system = System("Xavier_NX", Architecture.Xavier, 1, cpu_arch=CPUArch.aarch64) input_dtype = "fp16" input_format = "dhwc8" tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp16_dhwc8" workspace_size = 1073741824 gpu_batch_size = 1 gpu_copy_streams = 1 offline_expected_qps = 1.5 use_direct_host_access = True @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class Xavier_NX_HighAccuracy(Xavier_NX): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class Xavier_NX_Triton(Xavier_NX): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class Xavier_NX_HighAccuracy_Triton(Xavier_NX_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxQ) class Xavier_NX_MaxQ(Xavier_NX): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxQ) class Xavier_NX_HighAccuracy_MaxQ(Xavier_NX_MaxQ): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x1(OfflineGPUBaseConfig): system = System("T4", Architecture.Turing, 1) gpu_batch_size = 2 offline_expected_qps = 8 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x1_HighAccuracy(T4x1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x1_Triton(T4x1): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x1_HighAccuracy_Triton(T4x1_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x20(T4x1): system = System("T4", Architecture.Turing, 20) offline_expected_qps = 160 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x20_HighAccuracy(T4x20): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x20_Triton(T4x20): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x20_HighAccuracy_Triton(T4x20_Triton): pass @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x8(T4x1): system = System("T4", Architecture.Turing, 8) offline_expected_qps = 64 @ConfigRegistry.register(HarnessType.LWIS, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x8_HighAccuracy(T4x8): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class T4x8_Triton(T4x8): use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class T4x8_HighAccuracy_Triton(T4x8_Triton): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class Triton_CPU_2S_8360Yx1(OfflineCPUBaseConfig): system = System("Triton_CPU_2S_8360Y", Architecture.Intel_CPU_x86_64, 1) precision = "fp32" offline_expected_qps = 6 batch_size = 0 input_dtype = "fp32" max_queue_delay_usec = 100 model_name = "3dunet_int8_openvino" num_instances = 16 ov_parameters = { 'CPU_THREADS_NUM': '72', 'CPU_THROUGHPUT_STREAMS': '8', 'ENABLE_BATCH_PADDING': 'NO', 'SKIP_OV_DYNAMIC_BATCHSIZE': 'YES' } tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp32" use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class Triton_CPU_2S_8360Yx1_HighAccuracy(Triton_CPU_2S_8360Yx1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class Triton_CPU_2S_6258Rx1(OfflineCPUBaseConfig): system = System("Triton_CPU_2S_6258R", Architecture.Intel_CPU_x86_64, 1) precision = "fp32" offline_expected_qps = 4 batch_size = 0 input_dtype = "fp32" max_queue_delay_usec = 100 model_name = "3dunet_int8_openvino" num_instances = 16 ov_parameters = { 'CPU_THREADS_NUM': '56', 'CPU_THROUGHPUT_STREAMS': '8', 'ENABLE_BATCH_PADDING': 'NO', 'SKIP_OV_DYNAMIC_BATCHSIZE': 'YES' } tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp32" use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class Triton_CPU_2S_6258Rx1_HighAccuracy(Triton_CPU_2S_6258Rx1): pass @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99, PowerSetting.MaxP) class Triton_CPU_4S_8380Hx1(OfflineCPUBaseConfig): system = System("Triton_CPU_4S_8380H", Architecture.Intel_CPU_x86_64, 1) precision = "fp32" offline_expected_qps = 10 batch_size = 0 input_dtype = "fp32" max_queue_delay_usec = 100 model_name = "3dunet_int8_openvino" num_instances = 32 ov_parameters = { 'CPU_THREADS_NUM': '112', 'CPU_THROUGHPUT_STREAMS': '16', 'ENABLE_BATCH_PADDING': 'NO', 'SKIP_OV_DYNAMIC_BATCHSIZE': 'YES' } tensor_path = "${PREPROCESSED_DATA_DIR}/brats/brats_npy/fp32" use_triton = True @ConfigRegistry.register(HarnessType.Triton, AccuracyTarget.k_99_9, PowerSetting.MaxP) class Triton_CPU_4S_8380Hx1_HighAccuracy(Triton_CPU_4S_8380Hx1): pass
33.288154
102
0.791158
4,057
31,191
5.759428
0.07296
0.129932
0.194899
0.110845
0.871223
0.842421
0.817641
0.788967
0.762818
0.737824
0
0.091418
0.117277
31,191
936
103
33.323718
0.757237
0.022122
0
0.636364
0
0.010972
0.062818
0.038183
0
0
0
0.001068
0
1
0
false
0.10815
0.010972
0
0.619122
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
9
7c5d7308007377173b306ce693d2732486aec3e8
81
py
Python
python/testData/copyPaste/SelectionReverse3.after.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2018-12-29T09:53:39.000Z
2018-12-29T09:53:42.000Z
python/testData/copyPaste/SelectionReverse3.after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/copyPaste/SelectionReverse3.after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
if True: a = 1 b = 2 def f(): if True: a = 1 b = 2
8.1
13
0.296296
14
81
1.714286
0.571429
0.5
0.583333
0.666667
0.833333
0.833333
0
0
0
0
0
0.121212
0.592593
81
9
14
9
0.606061
0
0
0.857143
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.142857
0
1
0
1
null
1
1
1
1
1
0
0
0
0
0
0
1
0
1
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
7ca61dbbfbdc935dbff3557cc81726494f278882
68,606
py
Python
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_heteroFair/cmp_cactusADM/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_heteroFair/cmp_cactusADM/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_heteroFair/cmp_cactusADM/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.191274, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.352924, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 1.21162, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.427463, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.740212, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.424532, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.59221, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.236773, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 7.40368, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.2289, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0154959, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.1764, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.114602, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.4053, 'Execution Unit/Register Files/Runtime Dynamic': 0.130097, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.478657, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.24384, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 3.72444, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 4.21395e-05, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 4.21395e-05, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.64365e-05, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.39591e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00164626, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00176698, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000413568, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.110169, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.271146, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.374185, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96874, 'Instruction Fetch Unit/Runtime Dynamic': 0.757681, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0784567, 'L2/Runtime Dynamic': 0.0216862, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 5.88588, 'Load Store Unit/Data Cache/Runtime Dynamic': 2.25897, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.150398, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.150398, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 6.59898, 'Load Store Unit/Runtime Dynamic': 3.15108, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.370857, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.741714, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.131618, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.132765, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0445451, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.786036, 'Memory Management Unit/Runtime Dynamic': 0.17731, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 28.3976, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.798581, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0314677, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.207702, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 1.03775, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 8.86995, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0820926, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.267168, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.519928, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.158611, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.255834, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.129136, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.543581, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.101693, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.894, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0982255, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00665286, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0757283, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0492019, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.173954, 'Execution Unit/Register Files/Runtime Dynamic': 0.0558548, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.180066, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.470888, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.74287, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 1.72019e-05, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 1.72019e-05, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.49826e-05, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 5.79984e-06, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00070679, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000756176, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000164941, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0472991, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.00863, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.116385, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.160649, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 5.37316, 'Instruction Fetch Unit/Runtime Dynamic': 0.325254, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0351162, 'L2/Runtime Dynamic': 0.00990718, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.23492, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.97174, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0646337, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0646336, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.54014, 'Load Store Unit/Runtime Dynamic': 1.35512, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.159376, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.318751, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.056563, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0570774, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.187066, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0191178, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.44034, 'Memory Management Unit/Runtime Dynamic': 0.0761952, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 17.8722, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.258386, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0103006, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0759781, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.344665, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 3.85401, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0816004, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.266781, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.516846, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.157709, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.254379, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.128402, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.540489, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.101133, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.88771, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0976433, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00661502, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0752872, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0489221, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.17293, 'Execution Unit/Register Files/Runtime Dynamic': 0.0555371, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.179014, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.46804, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.73622, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 1.89701e-05, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 1.89701e-05, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.65671e-05, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.4376e-06, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00070277, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000757278, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000180304, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0470301, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.99152, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.115756, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.159735, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 5.35522, 'Instruction Fetch Unit/Runtime Dynamic': 0.323459, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.035218, 'L2/Runtime Dynamic': 0.0100071, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.22239, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.965885, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0642282, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0642282, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.52569, 'Load Store Unit/Runtime Dynamic': 1.34686, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.158376, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.316752, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0562081, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.056724, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.186002, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0190149, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.438666, 'Memory Management Unit/Runtime Dynamic': 0.0757389, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 17.832, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.256855, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0102413, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0755441, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.34264, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 3.83493, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0836494, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.26839, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.529876, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.16157, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.260607, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.131546, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.553723, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.103553, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.91429, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.100105, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00677699, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0771461, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.05012, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.177251, 'Execution Unit/Register Files/Runtime Dynamic': 0.056897, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.183442, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.479492, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.76388, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 1.92389e-05, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 1.92389e-05, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.68005e-05, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.52749e-06, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000719978, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000775256, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00018291, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0481816, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.06477, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.118588, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.163646, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 5.43202, 'Instruction Fetch Unit/Runtime Dynamic': 0.331374, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0351213, 'L2/Runtime Dynamic': 0.00974348, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.27002, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.988267, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0657692, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0657692, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.5806, 'Load Store Unit/Runtime Dynamic': 1.37839, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.162176, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.324351, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0575566, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0580698, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.190556, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0194831, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.445537, 'Memory Management Unit/Runtime Dynamic': 0.077553, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 17.997, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.26333, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0104943, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0773874, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.351211, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 3.91215, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}], 'DRAM': {'Area': 0, 'Gate Leakage': 0, 'Peak Dynamic': 6.968709236359436, 'Runtime Dynamic': 6.968709236359436, 'Subthreshold Leakage': 4.252, 'Subthreshold Leakage with power gating': 4.252}, 'L3': [{'Area': 61.9075, 'Gate Leakage': 0.0484137, 'Peak Dynamic': 0.272973, 'Runtime Dynamic': 0.109611, 'Subthreshold Leakage': 6.80085, 'Subthreshold Leakage with power gating': 3.32364}], 'Processor': {'Area': 191.908, 'Gate Leakage': 1.53485, 'Peak Dynamic': 82.3718, 'Peak Power': 115.484, 'Runtime Dynamic': 20.5807, 'Subthreshold Leakage': 31.5774, 'Subthreshold Leakage with power gating': 13.9484, 'Total Cores/Area': 128.669, 'Total Cores/Gate Leakage': 1.4798, 'Total Cores/Peak Dynamic': 82.0988, 'Total Cores/Runtime Dynamic': 20.471, 'Total Cores/Subthreshold Leakage': 24.7074, 'Total Cores/Subthreshold Leakage with power gating': 10.2429, 'Total L3s/Area': 61.9075, 'Total L3s/Gate Leakage': 0.0484137, 'Total L3s/Peak Dynamic': 0.272973, 'Total L3s/Runtime Dynamic': 0.109611, 'Total L3s/Subthreshold Leakage': 6.80085, 'Total L3s/Subthreshold Leakage with power gating': 3.32364, 'Total Leakage': 33.1122, 'Total NoCs/Area': 1.33155, 'Total NoCs/Gate Leakage': 0.00662954, 'Total NoCs/Peak Dynamic': 0.0, 'Total NoCs/Runtime Dynamic': 0.0, 'Total NoCs/Subthreshold Leakage': 0.0691322, 'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
75.061269
124
0.681835
8,098
68,606
5.770561
0.067424
0.123604
0.11299
0.093473
0.939675
0.932356
0.91896
0.886882
0.863642
0.844404
0
0.131298
0.224339
68,606
914
125
75.061269
0.746838
0
0
0.642232
0
0
0.65744
0.0481
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7cb130c41763f05f26c638fca70585f0091b8c83
15,969
py
Python
env3.10/lib/python3.10/site-packages/pyramid/tests/test_scripts/test_pshell.py
slmaankhaan/todo_app
4e5a81a789e02be84525682f3ec5d0bfc3d91e8d
[ "MIT" ]
null
null
null
env3.10/lib/python3.10/site-packages/pyramid/tests/test_scripts/test_pshell.py
slmaankhaan/todo_app
4e5a81a789e02be84525682f3ec5d0bfc3d91e8d
[ "MIT" ]
null
null
null
env3.10/lib/python3.10/site-packages/pyramid/tests/test_scripts/test_pshell.py
slmaankhaan/todo_app
4e5a81a789e02be84525682f3ec5d0bfc3d91e8d
[ "MIT" ]
null
null
null
import unittest from pyramid.tests.test_scripts import dummy class TestPShellCommand(unittest.TestCase): def _getTargetClass(self): from pyramid.scripts.pshell import PShellCommand return PShellCommand def _makeOne(self, patch_bootstrap=True, patch_config=True, patch_args=True, patch_options=True): cmd = self._getTargetClass()([]) if patch_bootstrap: self.bootstrap = dummy.DummyBootstrap() cmd.bootstrap = (self.bootstrap,) if patch_config: self.config_factory = dummy.DummyConfigParserFactory() cmd.ConfigParser = self.config_factory if patch_args: self.args = ('/foo/bar/myapp.ini#myapp',) cmd.args = self.args if patch_options: class Options(object): pass self.options = Options() self.options.python_shell = '' self.options.setup = None cmd.options = self.options return cmd def test_make_default_shell(self): command = self._makeOne() interact = dummy.DummyInteractor() shell = command.make_default_shell(interact) shell({'foo': 'bar'}, 'a help message') self.assertEqual(interact.local, {'foo': 'bar'}) self.assertTrue('a help message' in interact.banner) def test_make_bpython_shell(self): command = self._makeOne() bpython = dummy.DummyBPythonShell() shell = command.make_bpython_shell(bpython) shell({'foo': 'bar'}, 'a help message') self.assertEqual(bpython.locals_, {'foo': 'bar'}) self.assertTrue('a help message' in bpython.banner) def test_make_ipython_v1_1_shell(self): command = self._makeOne() ipshell_factory = dummy.DummyIPShellFactory() shell = command.make_ipython_v1_1_shell(ipshell_factory) shell({'foo': 'bar'}, 'a help message') self.assertEqual(ipshell_factory.kw['user_ns'], {'foo': 'bar'}) self.assertTrue('a help message' in ipshell_factory.kw['banner2']) self.assertTrue(ipshell_factory.shell.called) def test_make_ipython_v0_11_shell(self): command = self._makeOne() ipshell_factory = dummy.DummyIPShellFactory() shell = command.make_ipython_v0_11_shell(ipshell_factory) shell({'foo': 'bar'}, 'a help message') self.assertEqual(ipshell_factory.kw['user_ns'], {'foo': 'bar'}) self.assertTrue('a help message' in ipshell_factory.kw['banner2']) self.assertTrue(ipshell_factory.shell.called) def test_make_ipython_v0_10_shell(self): command = self._makeOne() ipshell_factory = dummy.DummyIPShellFactory() shell = command.make_ipython_v0_10_shell(ipshell_factory) shell({'foo': 'bar'}, 'a help message') self.assertEqual(ipshell_factory.kw['argv'], []) self.assertEqual(ipshell_factory.kw['user_ns'], {'foo': 'bar'}) self.assertTrue('a help message' in ipshell_factory.shell.banner) self.assertTrue(ipshell_factory.shell.called) def test_command_loads_default_shell(self): command = self._makeOne() shell = dummy.DummyShell() command.make_ipython_shell = lambda: None command.make_bpython_shell = lambda: None command.make_default_shell = lambda: shell command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_default_shell_with_unknown_shell(self): command = self._makeOne() shell = dummy.DummyShell() bad_shell = dummy.DummyShell() command.make_ipython_shell = lambda: bad_shell command.make_bpython_shell = lambda: bad_shell command.make_default_shell = lambda: shell command.options.python_shell = 'unknow_python_shell' command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertEqual(bad_shell.env, {}) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_ipython_v1_1(self): command = self._makeOne() shell = dummy.DummyShell() command.make_ipython_v1_1_shell = lambda: shell command.make_ipython_v0_11_shell = lambda: None command.make_ipython_v0_10_shell = lambda: None command.make_bpython_shell = lambda: None command.make_default_shell = lambda: None command.options.python_shell = 'ipython' command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_ipython_v0_11(self): command = self._makeOne() shell = dummy.DummyShell() command.make_ipython_v1_1_shell = lambda: None command.make_ipython_v0_11_shell = lambda: shell command.make_ipython_v0_10_shell = lambda: None command.make_bpython_shell = lambda: None command.make_default_shell = lambda: None command.options.python_shell = 'ipython' command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_ipython_v0_10(self): command = self._makeOne() shell = dummy.DummyShell() command.make_ipython_v1_1_shell = lambda: None command.make_ipython_v0_11_shell = lambda: None command.make_ipython_v0_10_shell = lambda: shell command.make_bpython_shell = lambda: None command.make_default_shell = lambda: None command.options.python_shell = 'ipython' command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_bpython_shell(self): command = self._makeOne() shell = dummy.DummyBPythonShell() command.make_ipython_shell = lambda: None command.make_bpython_shell = lambda: shell command.options.python_shell = 'bpython' command.run() self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.locals_, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.banner) def test_shell_ipython_ordering(self): command = self._makeOne() shell1_1 = dummy.DummyShell() shell0_11 = dummy.DummyShell() shell0_10 = dummy.DummyShell() command.make_ipython_v1_1_shell = lambda: shell1_1 shell = command.make_shell() self.assertEqual(shell, shell1_1) command.make_ipython_v1_1_shell = lambda: None command.make_ipython_v0_11_shell = lambda: shell0_11 shell = command.make_shell() self.assertEqual(shell, shell0_11) command.make_ipython_v0_11_shell = lambda: None command.make_ipython_v0_10_shell = lambda: shell0_10 shell = command.make_shell() self.assertEqual(shell, shell0_10) command.options.python_shell = 'ipython' command.make_ipython_v1_1_shell = lambda: shell1_1 shell = command.make_shell() self.assertEqual(shell, shell1_1) def test_shell_ordering(self): command = self._makeOne() ipshell = dummy.DummyShell() bpshell = dummy.DummyShell() dshell = dummy.DummyShell() command.make_ipython_shell = lambda: None command.make_bpython_shell = lambda: None command.make_default_shell = lambda: dshell shell = command.make_shell() self.assertEqual(shell, dshell) command.options.python_shell = 'ipython' shell = command.make_shell() self.assertEqual(shell, dshell) command.options.python_shell = 'bpython' shell = command.make_shell() self.assertEqual(shell, dshell) command.make_ipython_shell = lambda: ipshell command.make_bpython_shell = lambda: bpshell command.options.python_shell = 'ipython' shell = command.make_shell() self.assertEqual(shell, ipshell) command.options.python_shell = 'bpython' shell = command.make_shell() self.assertEqual(shell, bpshell) command.options.python_shell = 'python' shell = command.make_shell() self.assertEqual(shell, dshell) def test_command_loads_custom_items(self): command = self._makeOne() model = dummy.Dummy() self.config_factory.items = [('m', model)] shell = dummy.DummyShell() command.run(shell) self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':self.bootstrap.root, 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, 'm':model, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_setup(self): command = self._makeOne() def setup(env): env['a'] = 1 env['root'] = 'root override' self.config_factory.items = [('setup', setup)] shell = dummy.DummyShell() command.run(shell) self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':'root override', 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, 'a':1, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_check_variable_override_order(self): command = self._makeOne() model = dummy.Dummy() def setup(env): env['a'] = 1 env['m'] = 'model override' env['root'] = 'root override' self.config_factory.items = [('setup', setup), ('m', model)] shell = dummy.DummyShell() command.run(shell) self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':'root override', 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, 'a':1, 'm':model, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_loads_setup_from_options(self): command = self._makeOne() def setup(env): env['a'] = 1 env['root'] = 'root override' model = dummy.Dummy() self.config_factory.items = [('setup', 'abc'), ('m', model)] command.options.setup = setup shell = dummy.DummyShell() command.run(shell) self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':self.bootstrap.app, 'root':'root override', 'registry':self.bootstrap.registry, 'request':self.bootstrap.request, 'root_factory':self.bootstrap.root_factory, 'a':1, 'm':model, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) def test_command_custom_section_override(self): command = self._makeOne() dummy_ = dummy.Dummy() self.config_factory.items = [('app', dummy_), ('root', dummy_), ('registry', dummy_), ('request', dummy_)] shell = dummy.DummyShell() command.run(shell) self.assertTrue(self.config_factory.parser) self.assertEqual(self.config_factory.parser.filename, '/foo/bar/myapp.ini') self.assertEqual(self.bootstrap.a[0], '/foo/bar/myapp.ini#myapp') self.assertEqual(shell.env, { 'app':dummy_, 'root':dummy_, 'registry':dummy_, 'request':dummy_, 'root_factory':self.bootstrap.root_factory, }) self.assertTrue(self.bootstrap.closer.called) self.assertTrue(shell.help) class Test_main(unittest.TestCase): def _callFUT(self, argv): from pyramid.scripts.pshell import main return main(argv, quiet=True) def test_it(self): result = self._callFUT(['pshell']) self.assertEqual(result, 2)
41.803665
79
0.630785
1,783
15,969
5.455973
0.060572
0.096217
0.050678
0.0331
0.86801
0.83491
0.811986
0.78197
0.729235
0.726357
0
0.009289
0.251738
15,969
381
80
41.913386
0.804837
0
0
0.701149
0
0
0.084544
0.018036
0
0
0
0
0.264368
1
0.071839
false
0.002874
0.011494
0
0.100575
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7cbeec913b636e424574e9fa76ffadcb7faec830
5,964
py
Python
main.py
YUND4/WebScarpingPUC
08de5683cf86a6d073935662e1934c56491d56c9
[ "MIT" ]
null
null
null
main.py
YUND4/WebScarpingPUC
08de5683cf86a6d073935662e1934c56491d56c9
[ "MIT" ]
null
null
null
main.py
YUND4/WebScarpingPUC
08de5683cf86a6d073935662e1934c56491d56c9
[ "MIT" ]
null
null
null
#Esta aplicacion nos permite consumir paginas web #con web scarping #Implementacion por SwankySniperGG #MIT #github.com/ from bs4 import BeautifulSoup import json from scrapscript import simple_get class loadPUC: result = { 'cuentas': [], 'total': 0 } def merge(self, lis): result = '' for l in lis: result = result + l return result def mergeNames(self, lis): result = '' for l in lis: result = result + ' ' + l return result def __init__(self): dictionary = self.result i = 1 while i < 10: url = 'https://puc.com.co/'+str(i) print(url, end = '') if (i == 1): tipo = 'Debito' if (i == 2): tipo = 'Credito' if (i == 3): tipo = 'Credito' if (i == 4): tipo = 'Credito' if (i == 5): tipo = 'Debito' if (i == 6): tipo = 'Debito' if (i == 7): tipo = 'Debito' if (i == 8): tipo = 'Debito' if (i == 9): tipo = 'Credito' raw_html = simple_get(url) html = BeautifulSoup(raw_html, 'html.parser') if raw_html is not None: print(' Status - OK') x = html.h1.string x = x.split(' ') try: y = html.find('div', class_='col-md-7').p.string except: y = None dictionary['total'] = dictionary['total'] + 1 dictionary['cuentas'].append({ 'nombre': self.mergeNames(x[1:]), 'codigo': x[0], 'descripcion':y, 'tipo':tipo, 'hijos': [] }) lis = html.find_all('span', class_='code') for l in lis[1:]: url = 'https://puc.com.co/'+l.string print(url, end = '') raw_html = simple_get(url) html = BeautifulSoup(raw_html, 'html.parser') if raw_html is not None: print(' Status - OK') x = html.h1.string x = x.split(' ') try: y = html.find('div', class_='col-md-7').p.string except: y = None dictionary['total'] = dictionary['total'] + 1 dictionary['cuentas'][-1]['hijos'].append({ 'nombre': self.mergeNames(x[1:]), 'codigo': self.merge(x[0][-2:]), 'descripcion':y, 'tipo':tipo, 'hijos': [] }) lis = html.find_all('span', class_='code') for l in lis[2:]: url = 'https://puc.com.co/'+l.string print(url, end = '') raw_html = simple_get(url) html = BeautifulSoup(raw_html, 'html.parser') if raw_html is not None: print(' Status - OK') x = html.h1.string x = x.split(' ') try: y = html.find('div', class_='col-md-7').p.string except: y = None dictionary['total'] = dictionary['total'] + 1 dictionary['cuentas'][-1]['hijos'][-1]['hijos'].append({ 'nombre': self.mergeNames(x[1:]), 'codigo': self.merge(x[0][-2:]), 'descripcion':y, 'tipo':tipo, 'hijos': [] }) lis = html.find_all('span', class_='code') for l in lis[3:]: url = 'https://puc.com.co/'+l.string print(url, end = '') raw_html = simple_get(url) html = BeautifulSoup(raw_html, 'html.parser') if raw_html is not None: print(' Status - OK') x = html.h1.string x = x.split(' ') dictionary['total'] = dictionary['total'] + 1 dictionary['cuentas'][-1]['hijos'][-1]['hijos'][-1]['hijos'].append({ 'nombre': self.mergeNames(x[1:]), 'codigo': self.merge(x[0][-2:]), 'descripcion': None, 'tipo':tipo, 'hijos': [] }) else: print(' Status - Not found') else: print(' Status - Not found') else: print(' Status - Not found') else: print(' Status - Not found') i = i + 1 with open('result.json', 'w') as fp: json.dump(dictionary, fp) if __name__ == '__main__': loadPUC()
40.571429
109
0.330651
486
5,964
3.979424
0.207819
0.043433
0.015512
0.023268
0.750776
0.742503
0.742503
0.724922
0.724922
0.724922
0
0.017306
0.554326
5,964
147
110
40.571429
0.710309
0.018612
0
0.703704
0
0
0.106343
0
0
0
0
0
0
1
0.022222
false
0
0.022222
0
0.074074
0.088889
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7cc8e277f74b0866af0c18e9d2beb0b098ed68e0
14,455
py
Python
tests/hdx/utilities/test_path.py
OCHA-DAP/hdx-python-utilities
3ff2720bddf7b4ee107adbe3a2222c9e8fbd487f
[ "MIT" ]
4
2019-01-04T10:44:18.000Z
2020-01-23T14:06:38.000Z
tests/hdx/utilities/test_path.py
OCHA-DAP/hdx-python-utilities
3ff2720bddf7b4ee107adbe3a2222c9e8fbd487f
[ "MIT" ]
3
2017-11-01T08:57:02.000Z
2021-10-17T20:51:15.000Z
tests/hdx/utilities/test_path.py
OCHA-DAP/hdx-python-utilities
3ff2720bddf7b4ee107adbe3a2222c9e8fbd487f
[ "MIT" ]
1
2018-09-12T18:02:22.000Z
2018-09-12T18:02:22.000Z
"""Path Utility Tests""" import copy from os.path import exists, join from shutil import rmtree from tempfile import gettempdir import pytest from hdx.utilities.loader import load_file_to_str from hdx.utilities.path import ( get_filename_extension_from_url, get_filename_from_url, get_temp_dir, multiple_progress_storing_tempdir, progress_storing_tempdir, temp_dir, ) class TestPath: @pytest.fixture(scope="class") def mytestdir(self): return join("haha", "lala") @pytest.fixture(scope="class") def fixtureurl(self): return "https://raw.githubusercontent.com/OCHA-DAP/hdx-python-utilities/master/tests/fixtures/test_data.csv" def test_get_temp_dir(self, monkeypatch, mytestdir): assert get_temp_dir() == gettempdir() assert get_temp_dir("TEST") == join(gettempdir(), "TEST") monkeypatch.setenv("TEMP_DIR", mytestdir) assert get_temp_dir() == mytestdir monkeypatch.delenv("TEMP_DIR") def test_temp_dir(self, monkeypatch, mytestdir): monkeypatch.setenv("TEMP_DIR", mytestdir) with temp_dir() as tempdir: assert tempdir == mytestdir monkeypatch.delenv("TEMP_DIR") tempfolder = "papa" expected_dir = join(gettempdir(), tempfolder) with temp_dir(tempfolder) as tempdir: assert tempdir == expected_dir assert exists(tempdir) is False try: with temp_dir(tempfolder) as tempdir: assert tempdir == expected_dir raise ValueError("Fail!") except ValueError: pass assert exists(tempdir) is False with temp_dir( tempfolder, delete_on_success=True, delete_on_failure=True ) as tempdir: assert tempdir == expected_dir assert exists(tempdir) is False try: with temp_dir( tempfolder, delete_on_success=True, delete_on_failure=True ) as tempdir: assert tempdir == expected_dir raise ValueError("Fail!") except ValueError: pass assert exists(tempdir) is False with temp_dir( tempfolder, delete_on_success=False, delete_on_failure=False ) as tempdir: assert tempdir == expected_dir assert exists(tempdir) is True rmtree(tempdir) try: with temp_dir( tempfolder, delete_on_success=False, delete_on_failure=False ) as tempdir: assert tempdir == expected_dir raise ValueError("Fail!") except ValueError: pass assert exists(tempdir) is True with temp_dir( tempfolder, delete_on_success=True, delete_on_failure=False ) as tempdir: assert tempdir == expected_dir assert exists(tempdir) is False try: with temp_dir( tempfolder, delete_on_success=True, delete_on_failure=False ) as tempdir: assert tempdir == expected_dir raise ValueError("Fail!") except ValueError: pass assert exists(tempdir) is True rmtree(tempdir) with temp_dir( tempfolder, delete_on_success=False, delete_on_failure=True ) as tempdir: assert tempdir == expected_dir assert exists(tempdir) is True rmtree(tempdir) try: with temp_dir( tempfolder, delete_on_success=False, delete_on_failure=True ) as tempdir: assert tempdir == expected_dir raise ValueError("Fail!") except ValueError: pass assert exists(tempdir) is False def test_progress_storing_tempdir(self, monkeypatch): tempfolder = "papa" expected_dir = join(gettempdir(), tempfolder) rmtree(expected_dir, ignore_errors=True) iterator = [ {"iso3": "AFG", "name": "Afghanistan"}, {"iso3": "SDN", "name": "Sudan"}, {"iso3": "YEM", "name": "Yemen"}, {"iso3": "ZAM", "name": "Zambia"}, ] expected_batch_file = join(expected_dir, "batch.txt") result = list() for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): assert info["folder"] == expected_dir expected_batch = load_file_to_str(expected_batch_file, strip=True) result.append(nextdict) assert result == iterator assert expected_batch == info["batch"] assert exists(expected_dir) is False monkeypatch.setenv("WHERETOSTART", "iso3=SDN") result = list() for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): assert exists(info["folder"]) is True assert info["folder"] == expected_dir expected_batch = load_file_to_str(expected_batch_file, strip=True) result.append(nextdict) assert result == iterator[1:] assert expected_batch == info["batch"] assert exists(expected_dir) is False monkeypatch.delenv("WHERETOSTART") try: for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): if nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass assert exists(expected_dir) is True result = list() for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): assert exists(info["folder"]) is True assert info["folder"] == expected_dir assert info["batch"] == start_batch result.append(nextdict) assert result == iterator[2:] assert exists(expected_dir) is False try: for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): if nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass assert exists(expected_dir) is True monkeypatch.setenv("WHERETOSTART", "RESET") result = list() for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): assert exists(info["folder"]) is True assert info["folder"] == expected_dir assert info["batch"] != start_batch result.append(nextdict) assert result == iterator assert exists(expected_dir) is False monkeypatch.delenv("WHERETOSTART") try: for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): if nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass assert exists(expected_dir) is True monkeypatch.setenv("WHERETOSTART", "iso3=SDN") result = list() for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): assert exists(info["folder"]) is True assert info["folder"] == expected_dir assert info["batch"] == start_batch result.append(nextdict) assert result == iterator[1:] assert exists(expected_dir) is False monkeypatch.delenv("WHERETOSTART") try: for info, nextdict in progress_storing_tempdir( tempfolder, iterator, "iso3" ): if nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass monkeypatch.setenv("WHERETOSTART", "iso3=NOTFOUND") found = False for _ in progress_storing_tempdir(tempfolder, iterator, "iso3"): found = True assert found is False assert exists(expected_dir) is True batch = load_file_to_str(expected_batch_file, strip=True) assert batch == start_batch monkeypatch.delenv("WHERETOSTART") monkeypatch.setenv("WHERETOSTART", "NOTFOUND=SDN") found = False for _ in progress_storing_tempdir(tempfolder, iterator, "iso3"): found = True assert found is False assert exists(expected_dir) is True batch = load_file_to_str(expected_batch_file, strip=True) assert batch == start_batch monkeypatch.delenv("WHERETOSTART") rmtree(expected_dir, ignore_errors=True) def test_multiple_progress_storing_tempdir(self, monkeypatch): tempfolder = "gaga" expected_dir = join(gettempdir(), tempfolder) rmtree(expected_dir, ignore_errors=True) iterator1 = [{"emergency_id": "911"}] iterator2 = [ {"iso3": "AFG", "name": "Afghanistan"}, {"iso3": "SDN", "name": "Sudan"}, {"iso3": "YEM", "name": "Yemen"}, {"iso3": "ZAM", "name": "Zambia"}, ] iterators = [iterator1, iterator2] keys = ["emergency_id", "iso3"] results = list() for result in multiple_progress_storing_tempdir( tempfolder, iterators, keys, "1234" ): results.append(copy.deepcopy(result)) expected_results = [ ( 0, { "folder": "/tmp/gaga/0", "batch": "1234", "progress": "emergency_id=911", }, {"emergency_id": "911"}, ), ( 1, { "folder": "/tmp/gaga/1", "batch": "1234", "progress": "iso3=AFG", }, {"iso3": "AFG", "name": "Afghanistan"}, ), ( 1, { "folder": "/tmp/gaga/1", "batch": "1234", "progress": "iso3=SDN", }, {"iso3": "SDN", "name": "Sudan"}, ), ( 1, { "folder": "/tmp/gaga/1", "batch": "1234", "progress": "iso3=YEM", }, {"iso3": "YEM", "name": "Yemen"}, ), ( 1, { "folder": "/tmp/gaga/1", "batch": "1234", "progress": "iso3=ZAM", }, {"iso3": "ZAM", "name": "Zambia"}, ), ] assert results == expected_results assert exists(expected_dir) is False results = list() try: for result in multiple_progress_storing_tempdir( tempfolder, iterators, keys ): results.append(copy.deepcopy(result)) i, info, nextdict = result if "iso3" in nextdict and nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass for result in expected_results: result[1]["batch"] = start_batch assert results == expected_results[:4] assert exists(expected_dir) is True result = list() for _, info, nextdict in multiple_progress_storing_tempdir( tempfolder, iterators, keys ): assert exists(info["folder"]) is True assert info["folder"] == join(expected_dir, "1") assert info["batch"] == start_batch result.append(nextdict) assert result == iterator2[2:] assert exists(expected_dir) is False try: for _, info, nextdict in multiple_progress_storing_tempdir( tempfolder, iterators, keys ): if "iso3" in nextdict and nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass for result in expected_results: result[1]["batch"] = start_batch assert exists(expected_dir) is True monkeypatch.setenv("WHERETOSTART", "RESET") results = list() for result in multiple_progress_storing_tempdir( tempfolder, iterators, keys, "1234" ): results.append(copy.deepcopy(result)) for result in expected_results: result[1]["batch"] = "1234" assert results == expected_results assert exists(expected_dir) is False monkeypatch.delenv("WHERETOSTART") try: for _, info, nextdict in multiple_progress_storing_tempdir( tempfolder, iterators, keys ): if "iso3" in nextdict and nextdict["iso3"] == "YEM": start_batch = info["batch"] raise ValueError("Problem!") except ValueError: pass for result in expected_results: result[1]["batch"] = start_batch assert exists(expected_dir) is True monkeypatch.setenv("WHERETOSTART", "iso3=SDN") result = list() for _, info, nextdict in multiple_progress_storing_tempdir( tempfolder, iterators, keys ): assert exists(info["folder"]) is True assert info["folder"] == join(expected_dir, "1") assert info["batch"] == start_batch result.append(nextdict) assert result == iterator2[1:] assert exists(expected_dir) is False monkeypatch.delenv("WHERETOSTART") rmtree(expected_dir, ignore_errors=True) def test_get_filename_extension_from_url(self, fixtureurl): filename = get_filename_from_url(fixtureurl) assert filename == "test_data.csv" filename, extension = get_filename_extension_from_url(fixtureurl) assert filename == "test_data" assert extension == ".csv"
35.868486
116
0.547008
1,395
14,455
5.490323
0.089606
0.060321
0.063194
0.075206
0.865779
0.815772
0.804805
0.784698
0.779606
0.760804
0
0.011897
0.354549
14,455
402
117
35.957711
0.809003
0.001245
0
0.795213
0
0.00266
0.088421
0
0
0
0
0
0.207447
1
0.018617
false
0.031915
0.018617
0.005319
0.045213
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
6b11e74e28410d1cd32e2afba7a83bd84cd5e120
50,879
py
Python
tests/src/OneLogin/saml2_tests/idp_metadata_parser_test.py
tuvshuud/python-saml
3bbc0a99659a7d71b70784a479c2aed3d14001f5
[ "MIT" ]
2
2018-12-05T12:45:59.000Z
2019-06-27T12:01:47.000Z
tests/src/OneLogin/saml2_tests/idp_metadata_parser_test.py
sighttviewliu/python-saml
3814b0fe98d6ab78cf92b39c15e1785b1cab22bb
[ "MIT" ]
null
null
null
tests/src/OneLogin/saml2_tests/idp_metadata_parser_test.py
sighttviewliu/python-saml
3814b0fe98d6ab78cf92b39c15e1785b1cab22bb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2010-2018 OneLogin, Inc. # MIT License from copy import deepcopy import json from os.path import dirname, join, exists from lxml.etree import XMLSyntaxError import unittest from urllib2 import URLError from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser from onelogin.saml2.constants import OneLogin_Saml2_Constants class OneLogin_Saml2_IdPMetadataParser_Test(unittest.TestCase): # Instruct unittest to not hide diffs upon test failure, even for complex # dictionaries. This prevents the message "Diff is 907 characters long. # Set self.maxDiff to None to see it." from showing up. maxDiff = None data_path = join(dirname(dirname(dirname(dirname(__file__)))), 'data') settings_path = join(dirname(dirname(dirname(dirname(__file__)))), 'settings') def loadSettingsJSON(self, filename='settings1.json'): filename = join(self.settings_path, filename) if exists(filename): stream = open(filename, 'r') settings = json.load(stream) stream.close() return settings else: raise Exception('Settings json file does not exist') def file_contents(self, filename): f = open(filename, 'r') content = f.read() f.close() return content def testGetMetadata(self): """ Tests the get_metadata method of the OneLogin_Saml2_IdPMetadataParser """ with self.assertRaises(Exception): data = OneLogin_Saml2_IdPMetadataParser.get_metadata('http://google.es') try: data = OneLogin_Saml2_IdPMetadataParser.get_metadata('https://www.testshib.org/metadata/testshib-providers.xml') except URLError: data = self.file_contents(join(self.data_path, 'metadata', 'testshib-providers.xml')) self.assertTrue(data is not None and data is not {}) def testParseRemote(self): """ Tests the parse_remote method of the OneLogin_Saml2_IdPMetadataParser """ with self.assertRaises(Exception): data = OneLogin_Saml2_IdPMetadataParser.parse_remote('http://google.es') try: data = OneLogin_Saml2_IdPMetadataParser.parse_remote('https://www.testshib.org/metadata/testshib-providers.xml') except URLError: xml = self.file_contents(join(self.data_path, 'metadata', 'testshib-providers.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml) self.assertTrue(data is not None and data is not {}) expected_settings_json = """ { "sp": { "NameIDFormat": "urn:mace:shibboleth:1.0:nameIdentifier" }, "idp": { "x509cert": "MIIDAzCCAeugAwIBAgIVAPX0G6LuoXnKS0Muei006mVSBXbvMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNVBAMMEGlkcC50ZXN0c2hpYi5vcmcwHhcNMTYwODIzMjEyMDU0WhcNMzYwODIzMjEyMDU0WjAbMRkwFwYDVQQDDBBpZHAudGVzdHNoaWIub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAg9C4J2DiRTEhJAWzPt1S3ryhm3M2P3hPpwJwvt2q948vdTUxhhvNMuc3M3S4WNh6JYBs53R+YmjqJAII4ShMGNEmlGnSVfHorex7IxikpuDPKV3SNf28mCAZbQrX+hWA+ann/uifVzqXktOjs6DdzdBnxoVhniXgC8WCJwKcx6JO/hHsH1rG/0DSDeZFpTTcZHj4S9MlLNUtt5JxRzV/MmmB3ObaX0CMqsSWUOQeE4nylSlp5RWHCnx70cs9kwz5WrflnbnzCeHU2sdbNotBEeTHot6a2cj/pXlRJIgPsrL/4VSicPZcGYMJMPoLTJ8mdy6mpR6nbCmP7dVbCIm/DQIDAQABoz4wPDAdBgNVHQ4EFgQUUfaDa2mPi24x09yWp1OFXmZ2GPswGwYDVR0RBBQwEoIQaWRwLnRlc3RzaGliLm9yZzANBgkqhkiG9w0BAQsFAAOCAQEASKKgqTxhqBzROZ1eVy++si+eTTUQZU4+8UywSKLia2RattaAPMAcXUjO+3cYOQXLVASdlJtt+8QPdRkfp8SiJemHPXC8BES83pogJPYEGJsKo19l4XFJHPnPy+Dsn3mlJyOfAa8RyWBS80u5lrvAcr2TJXt9fXgkYs7BOCigxtZoR8flceGRlAZ4p5FPPxQR6NDYb645jtOTMVr3zgfjP6Wh2dt+2p04LG7ENJn8/gEwtXVuXCsPoSCDx9Y0QmyXTJNdV1aB0AhORkWPlFYwp+zOyOIR+3m1+pqWFpn0eT/HrxpdKa74FA3R2kq4R7dXe4G0kUgXTdqXMLRKhDgdmA==", "entityId": "https://idp.testshib.org/idp/shibboleth", "singleSignOnService": { "url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data) def testParse(self): """ Tests the parse method of the OneLogin_Saml2_IdPMetadataParser """ with self.assertRaises(XMLSyntaxError): data = OneLogin_Saml2_IdPMetadataParser.parse('') xml_sp_metadata = self.file_contents(join(self.data_path, 'metadata', 'metadata_settings1.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml_sp_metadata) self.assertEqual({}, data) xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) # W/o further specification, expect to get the redirect binding SSO # URL extracted. expected_settings_json = """ { "idp": { "singleSignOnService": { "url": "https://app.onelogin.com/trust/saml2/http-post/sso/383123", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "x509cert": "MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbTAeFw0xMzA2MDUxNzE2MjBaFw0xODA2MDUxNzE2MjBaMGcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQHDAxTYW50YSBNb25pY2ExETAPBgNVBAoMCE9uZUxvZ2luMRkwFwYDVQQDDBBhcHAub25lbG9naW4uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAse8rnep4qL2GmhH10pMQyJ2Jae+AQHyfgVjaQZ7Z0QQog5jX91vcJRSMi0XWJnUtOr6lF0dq1+yckjZ92wyLrH+7fvngNO1aV4Mjk9sTgf+iqMrae6y6fRxDt9PXrEFVjvd3vv7QTJf2FuIPy4vVP06Dt8EMkQIr8rmLmU0mTr1k2DkrdtdlCuNFTXuAu3QqfvNCRrRwfNObn9MP6JeOUdcGLJsBjGF8exfcN1SFzRF0JFr3dmOlx761zK5liD0T1sYWnDquatj/JD9fZMbKecBKni1NglH/LVd+b6aJUAr5LulERULUjLqYJRKW31u91/4Qazdo9tbvwqyFxaoUrwIDAQABo4HUMIHRMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFPWcXvQSlTXnzZD2xziuoUvrrDedMIGRBgNVHSMEgYkwgYaAFPWcXvQSlTXnzZD2xziuoUvrrDedoWukaTBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbYIBATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEFBQADggEBAB/8xe3rzqXQVxzHyAHuAuPa73ClDoL1cko0Fp8CGcqEIyj6Te9gx5z6wyfv+Lo8RFvBLlnB1lXqbC+fTGcVgG/4oKLJ5UwRFxInqpZPnOAudVNnd0PYOODn9FWs6u+OTIQIaIcPUv3MhB9lwHIJsTk/bs9xcru5TPyLIxLLd6ib/pRceKH2mTkzUd0DYk9CQNXXeoGx/du5B9nh3ClPTbVakRzl3oswgI5MQIphYxkW70SopEh4kOFSRE1ND31NNIq1YrXlgtkguQBFsZWuQOPR6cEwFZzP0tHTYbI839WgxX6hfhIUTUz6mLqq4+3P4BG3+1OXeVDg63y8Uh781sE=", "entityId": "https://app.onelogin.com/saml/metadata/383123" }, "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data) def test_parse_testshib_required_binding_sso_redirect(self): """ Test with testshib metadata. Especially test extracting SSO with REDIRECT binding. Note that the testshib metadata does not contain an SLO specification in the first <IDPSSODescriptor> tag. """ expected_settings_json = """ { "sp": { "NameIDFormat": "urn:mace:shibboleth:1.0:nameIdentifier" }, "idp": { "entityId": "https://idp.testshib.org/idp/shibboleth", "singleSignOnService": { "url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "x509cert": "MIIDAzCCAeugAwIBAgIVAPX0G6LuoXnKS0Muei006mVSBXbvMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNVBAMMEGlkcC50ZXN0c2hpYi5vcmcwHhcNMTYwODIzMjEyMDU0WhcNMzYwODIzMjEyMDU0WjAbMRkwFwYDVQQDDBBpZHAudGVzdHNoaWIub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAg9C4J2DiRTEhJAWzPt1S3ryhm3M2P3hPpwJwvt2q948vdTUxhhvNMuc3M3S4WNh6JYBs53R+YmjqJAII4ShMGNEmlGnSVfHorex7IxikpuDPKV3SNf28mCAZbQrX+hWA+ann/uifVzqXktOjs6DdzdBnxoVhniXgC8WCJwKcx6JO/hHsH1rG/0DSDeZFpTTcZHj4S9MlLNUtt5JxRzV/MmmB3ObaX0CMqsSWUOQeE4nylSlp5RWHCnx70cs9kwz5WrflnbnzCeHU2sdbNotBEeTHot6a2cj/pXlRJIgPsrL/4VSicPZcGYMJMPoLTJ8mdy6mpR6nbCmP7dVbCIm/DQIDAQABoz4wPDAdBgNVHQ4EFgQUUfaDa2mPi24x09yWp1OFXmZ2GPswGwYDVR0RBBQwEoIQaWRwLnRlc3RzaGliLm9yZzANBgkqhkiG9w0BAQsFAAOCAQEASKKgqTxhqBzROZ1eVy++si+eTTUQZU4+8UywSKLia2RattaAPMAcXUjO+3cYOQXLVASdlJtt+8QPdRkfp8SiJemHPXC8BES83pogJPYEGJsKo19l4XFJHPnPy+Dsn3mlJyOfAa8RyWBS80u5lrvAcr2TJXt9fXgkYs7BOCigxtZoR8flceGRlAZ4p5FPPxQR6NDYb645jtOTMVr3zgfjP6Wh2dt+2p04LG7ENJn8/gEwtXVuXCsPoSCDx9Y0QmyXTJNdV1aB0AhORkWPlFYwp+zOyOIR+3m1+pqWFpn0eT/HrxpdKa74FA3R2kq4R7dXe4G0kUgXTdqXMLRKhDgdmA==" } } """ try: xmldoc = OneLogin_Saml2_IdPMetadataParser.get_metadata( 'https://www.testshib.org/metadata/testshib-providers.xml') except Exception: xmldoc = self.file_contents(join(self.data_path, 'metadata', 'testshib-providers.xml')) # Parse, require SSO REDIRECT binding, implicitly. settings1 = OneLogin_Saml2_IdPMetadataParser.parse(xmldoc) # Parse, require SSO REDIRECT binding, explicitly. settings2 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT ) expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, settings1) self.assertEqual(expected_settings, settings2) def test_parse_testshib_required_binding_sso_post(self): """ Test with testshib metadata. Especially test extracting SSO with POST binding. """ expected_settings_json = """ { "sp": { "NameIDFormat": "urn:mace:shibboleth:1.0:nameIdentifier" }, "idp": { "x509cert": "MIIDAzCCAeugAwIBAgIVAPX0G6LuoXnKS0Muei006mVSBXbvMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNVBAMMEGlkcC50ZXN0c2hpYi5vcmcwHhcNMTYwODIzMjEyMDU0WhcNMzYwODIzMjEyMDU0WjAbMRkwFwYDVQQDDBBpZHAudGVzdHNoaWIub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAg9C4J2DiRTEhJAWzPt1S3ryhm3M2P3hPpwJwvt2q948vdTUxhhvNMuc3M3S4WNh6JYBs53R+YmjqJAII4ShMGNEmlGnSVfHorex7IxikpuDPKV3SNf28mCAZbQrX+hWA+ann/uifVzqXktOjs6DdzdBnxoVhniXgC8WCJwKcx6JO/hHsH1rG/0DSDeZFpTTcZHj4S9MlLNUtt5JxRzV/MmmB3ObaX0CMqsSWUOQeE4nylSlp5RWHCnx70cs9kwz5WrflnbnzCeHU2sdbNotBEeTHot6a2cj/pXlRJIgPsrL/4VSicPZcGYMJMPoLTJ8mdy6mpR6nbCmP7dVbCIm/DQIDAQABoz4wPDAdBgNVHQ4EFgQUUfaDa2mPi24x09yWp1OFXmZ2GPswGwYDVR0RBBQwEoIQaWRwLnRlc3RzaGliLm9yZzANBgkqhkiG9w0BAQsFAAOCAQEASKKgqTxhqBzROZ1eVy++si+eTTUQZU4+8UywSKLia2RattaAPMAcXUjO+3cYOQXLVASdlJtt+8QPdRkfp8SiJemHPXC8BES83pogJPYEGJsKo19l4XFJHPnPy+Dsn3mlJyOfAa8RyWBS80u5lrvAcr2TJXt9fXgkYs7BOCigxtZoR8flceGRlAZ4p5FPPxQR6NDYb645jtOTMVr3zgfjP6Wh2dt+2p04LG7ENJn8/gEwtXVuXCsPoSCDx9Y0QmyXTJNdV1aB0AhORkWPlFYwp+zOyOIR+3m1+pqWFpn0eT/HrxpdKa74FA3R2kq4R7dXe4G0kUgXTdqXMLRKhDgdmA==", "entityId": "https://idp.testshib.org/idp/shibboleth", "singleSignOnService": { "url": "https://idp.testshib.org/idp/profile/SAML2/POST/SSO", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" } } } """ try: xmldoc = OneLogin_Saml2_IdPMetadataParser.get_metadata( 'https://www.testshib.org/metadata/testshib-providers.xml') except URLError: xmldoc = self.file_contents(join(self.data_path, 'metadata', 'testshib-providers.xml')) # Parse, require POST binding. settings = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST ) expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, settings) def test_parse_required_binding_all(self): """ Test all combinations of the `require_slo_binding` and `require_sso_binding` parameters. Note: IdP metadata contains a single logout (SLO) service and does not specify any endpoint for the POST binding. """ expected_settings_json = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" }, "idp": { "entityId": "urn:example:idp", "x509cert": "MIIDPDCCAiQCCQDydJgOlszqbzANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEQMA4GA1UEChMHSmFua3lDbzESMBAGA1UEAxMJbG9jYWxob3N0MB4XDTE0MDMxMjE5NDYzM1oXDTI3MTExOTE5NDYzM1owYDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEDAOBgNVBAoTB0phbmt5Q28xEjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMGvJpRTTasRUSPqcbqCG+ZnTAurnu0vVpIG9lzExnh11o/BGmzu7lB+yLHcEdwrKBBmpepDBPCYxpVajvuEhZdKFx/Fdy6j5mH3rrW0Bh/zd36CoUNjbbhHyTjeM7FN2yF3u9lcyubuvOzr3B3gX66IwJlU46+wzcQVhSOlMk2tXR+fIKQExFrOuK9tbX3JIBUqItpI+HnAow509CnM134svw8PTFLkR6/CcMqnDfDK1m993PyoC1Y+N4X9XkhSmEQoAlAHPI5LHrvuujM13nvtoVYvKYoj7ScgumkpWNEvX652LfXOnKYlkB8ZybuxmFfIkzedQrbJsyOhfL03cMECAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAeHwzqwnzGEkxjzSD47imXaTqtYyETZow7XwBc0ZaFS50qRFJUgKTAmKS1xQBP/qHpStsROT35DUxJAE6NY1Kbq3ZbCuhGoSlY0L7VzVT5tpu4EY8+Dq/u2EjRmmhoL7UkskvIZ2n1DdERtd+YUMTeqYl9co43csZwDno/IKomeN5qaPc39IZjikJ+nUC6kPFKeu/3j9rgHNlRtocI6S1FdtFz9OZMQlpr0JbUt2T3xS/YoQJn6coDmJL5GTiiKM6cOe+Ur1VwzS1JEDbSS2TWWhzq8ojLdrotYLGd9JOsoQhElmz+tMfCFQUFLExinPAyy7YHlSiVX13QH2XTu/iQQ==", "singleSignOnService": { "url": "http://idp.example.com", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "singleLogoutService": { "url": "http://idp.example.com/logout", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ xmldoc = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata2.xml')) expected_settings = json.loads(expected_settings_json) # Parse, require SLO and SSO REDIRECT binding, implicitly. settings1 = OneLogin_Saml2_IdPMetadataParser.parse(xmldoc) # Parse, require SLO and SSO REDIRECT binding, explicitly. settings2 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT ) expected_settings1_2 = deepcopy(expected_settings) self.assertEqual(expected_settings1_2, settings1) self.assertEqual(expected_settings1_2, settings2) settings3 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST ) expected_settings3 = deepcopy(expected_settings) del expected_settings3['idp']['singleLogoutService'] del expected_settings3['idp']['singleSignOnService'] self.assertEqual(expected_settings3, settings3) settings4 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT ) settings5 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST ) expected_settings4_5 = deepcopy(expected_settings) del expected_settings4_5['idp']['singleSignOnService'] self.assertEqual(expected_settings4_5, settings4) self.assertEqual(expected_settings4_5, settings5) settings6 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST ) settings7 = OneLogin_Saml2_IdPMetadataParser.parse( xmldoc, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST ) expected_settings6_7 = deepcopy(expected_settings) del expected_settings6_7['idp']['singleLogoutService'] self.assertEqual(expected_settings6_7, settings6) self.assertEqual(expected_settings6_7, settings7) def test_parse_with_entity_id(self): """ Tests the parse method of the OneLogin_Saml2_IdPMetadataParser Case: Provide entity_id to identify the desired IdPDescriptor from EntitiesDescriptor """ xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_multiple_descriptors.xml')) # should find first descriptor data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) self.assertEqual("https://foo.example.com/access/saml/idp.xml", data["idp"]["entityId"]) # should find desired descriptor data2 = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata, entity_id="https://bar.example.com/access/saml/idp.xml") self.assertEqual("https://bar.example.com/access/saml/idp.xml", data2["idp"]["entityId"]) expected_settings_json = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" }, "idp": { "singleLogoutService": { "url": "https://hello.example.com/access/saml/logout", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "entityId": "https://bar.example.com/access/saml/idp.xml", "x509cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURxekNDQXhTZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBRENCaGpFTE1Ba0dBMVVFQmhNQ1FWVXgKRERBS0JnTlZCQWdUQTA1VFZ6RVBNQTBHQTFVRUJ4TUdVM2xrYm1WNU1Rd3dDZ1lEVlFRS0RBTlFTVlF4Q1RBSApCZ05WQkFzTUFERVlNQllHQTFVRUF3d1BiR0YzY21WdVkyVndhWFF1WTI5dE1TVXdJd1lKS29aSWh2Y05BUWtCCkRCWnNZWGR5Wlc1alpTNXdhWFJBWjIxaGFXd3VZMjl0TUI0WERURXlNRFF4T1RJeU5UUXhPRm9YRFRNeU1EUXgKTkRJeU5UUXhPRm93Z1lZeEN6QUpCZ05WQkFZVEFrRlZNUXd3Q2dZRFZRUUlFd05PVTFjeER6QU5CZ05WQkFjVApCbE41Wkc1bGVURU1NQW9HQTFVRUNnd0RVRWxVTVFrd0J3WURWUVFMREFBeEdEQVdCZ05WQkFNTUQyeGhkM0psCmJtTmxjR2wwTG1OdmJURWxNQ01HQ1NxR1NJYjNEUUVKQVF3V2JHRjNjbVZ1WTJVdWNHbDBRR2R0WVdsc0xtTnYKYlRDQm56QU5CZ2txaGtpRzl3MEJBUUVGQUFPQmpRQXdnWWtDZ1lFQXFqaWUzUjJvaStwRGFldndJeXMvbWJVVApubkdsa3h0ZGlrcnExMXZleHd4SmlQTmhtaHFSVzNtVXVKRXpsbElkVkw2RW14R1lUcXBxZjkzSGxoa3NhZUowCjhVZ2pQOVVtTVlyaFZKdTFqY0ZXVjdmei9yKzIxL2F3VG5EVjlzTVlRcXVJUllZeTdiRzByMU9iaXdkb3ZudGsKN2dGSTA2WjB2WmFjREU1Ym9xVUNBd0VBQWFPQ0FTVXdnZ0VoTUFrR0ExVWRFd1FDTUFBd0N3WURWUjBQQkFRRApBZ1VnTUIwR0ExVWREZ1FXQkJTUk9OOEdKOG8rOGpnRnRqa3R3WmRxeDZCUnlUQVRCZ05WSFNVRUREQUtCZ2dyCkJnRUZCUWNEQVRBZEJnbGdoa2dCaHZoQ0FRMEVFQllPVkdWemRDQllOVEE1SUdObGNuUXdnYk1HQTFVZEl3U0IKcXpDQnFJQVVrVGpmQmlmS1B2STRCYlk1TGNHWGFzZWdVY21oZ1l5a2dZa3dnWVl4Q3pBSkJnTlZCQVlUQWtGVgpNUXd3Q2dZRFZRUUlFd05PVTFjeER6QU5CZ05WQkFjVEJsTjVaRzVsZVRFTU1Bb0dBMVVFQ2d3RFVFbFVNUWt3CkJ3WURWUVFMREFBeEdEQVdCZ05WQkFNTUQyeGhkM0psYm1ObGNHbDBMbU52YlRFbE1DTUdDU3FHU0liM0RRRUoKQVF3V2JHRjNjbVZ1WTJVdWNHbDBRR2R0WVdsc0xtTnZiWUlCQVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9CZ1FDRQpUQWVKVERTQVc2ejFVRlRWN1FyZWg0VUxGT1JhajkrZUN1RjNLV0RIYyswSVFDajlyZG5ERzRRL3dmNy9yYVEwCkpuUFFDU0NkclBMSmV5b1BIN1FhVHdvYUY3ZHpWdzRMQ3N5TkpURld4NGNNNTBWdzZSNWZET2dpQzhic2ZmUzgKQkptb3VscnJaRE5OVmpHOG1XNmNMeHJZdlZRT3JSVmVjQ0ZJZ3NzQ2JBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", "singleSignOnService": { "url": "https://hello.example.com/access/saml/login", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data2) def test_parse_multi_certs(self): """ Tests the parse method of the OneLogin_Saml2_IdPMetadataParser Case: IdP metadata contains multiple certs """ xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata_multi_certs.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) expected_settings_json = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" }, "idp": { "singleLogoutService": { "url": "https://idp.examle.com/saml/slo", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "x509certMulti": { "encryption": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==" ], "signing": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==", "MIICZDCCAc2gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBPMQswCQYDVQQGEwJ1czEUMBIGA1UECAwLZXhhbXBsZS5jb20xFDASBgNVBAoMC2V4YW1wbGUuY29tMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xNzA0MTUxNjMzMThaFw0xODA0MTUxNjMzMThaME8xCzAJBgNVBAYTAnVzMRQwEgYDVQQIDAtleGFtcGxlLmNvbTEUMBIGA1UECgwLZXhhbXBsZS5jb20xFDASBgNVBAMMC2V4YW1wbGUuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC6GLkl5lDUZdHNDAojp5i24OoPlqrt5TGXJIPqAZYT1hQvJW5nv17MFDHrjmtEnmW4ACKEy0fAX80QWIcHunZSkbEGHb+NG/6oTi5RipXMvmHnfFnPJJ0AdtiLiPE478CV856gXekV4Xx5u3KrylcOgkpYsp0GMIQBDzleMUXlYQIDAQABo1AwTjAdBgNVHQ4EFgQUnP8vlYPGPL2n6ZzDYij2kMDC8wMwHwYDVR0jBBgwFoAUnP8vlYPGPL2n6ZzDYij2kMDC8wMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOBgQAlQGAl+b8Cpot1g+65lLLjVoY7APJPWLW0klKQNlMU0s4MU+71Y3ExUEOXDAZgKcFoavb1fEOGMwEf38NaJAy1e/l6VNuixXShffq20ymqHQxOG0q8ujeNkgZF9k6XDfn/QZ3AD0o/IrCT7UMc/0QsfgIjWYxwCvp2syApc5CYfQ==" ] }, "entityId": "https://idp.examle.com/saml/metadata", "singleSignOnService": { "url": "https://idp.examle.com/saml/sso", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data) def test_parse_multi_singing_certs(self): """ Tests the parse method of the OneLogin_Saml2_IdPMetadataParser Case: IdP metadata contains multiple signing certs and no encryption certs """ xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata_multi_signing_certs.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) expected_settings_json = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" }, "idp": { "singleLogoutService": { "url": "https://idp.examle.com/saml/slo", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "x509certMulti": { "signing": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==", "MIICZDCCAc2gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBPMQswCQYDVQQGEwJ1czEUMBIGA1UECAwLZXhhbXBsZS5jb20xFDASBgNVBAoMC2V4YW1wbGUuY29tMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xNzA0MTUxNjMzMThaFw0xODA0MTUxNjMzMThaME8xCzAJBgNVBAYTAnVzMRQwEgYDVQQIDAtleGFtcGxlLmNvbTEUMBIGA1UECgwLZXhhbXBsZS5jb20xFDASBgNVBAMMC2V4YW1wbGUuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC6GLkl5lDUZdHNDAojp5i24OoPlqrt5TGXJIPqAZYT1hQvJW5nv17MFDHrjmtEnmW4ACKEy0fAX80QWIcHunZSkbEGHb+NG/6oTi5RipXMvmHnfFnPJJ0AdtiLiPE478CV856gXekV4Xx5u3KrylcOgkpYsp0GMIQBDzleMUXlYQIDAQABo1AwTjAdBgNVHQ4EFgQUnP8vlYPGPL2n6ZzDYij2kMDC8wMwHwYDVR0jBBgwFoAUnP8vlYPGPL2n6ZzDYij2kMDC8wMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOBgQAlQGAl+b8Cpot1g+65lLLjVoY7APJPWLW0klKQNlMU0s4MU+71Y3ExUEOXDAZgKcFoavb1fEOGMwEf38NaJAy1e/l6VNuixXShffq20ymqHQxOG0q8ujeNkgZF9k6XDfn/QZ3AD0o/IrCT7UMc/0QsfgIjWYxwCvp2syApc5CYfQ==", "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==" ] }, "entityId": "https://idp.examle.com/saml/metadata", "singleSignOnService": { "url": "https://idp.examle.com/saml/sso", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data) def test_parse_multi_same_signing_and_encrypt_cert(self): """ Tests the parse method of the OneLogin_Saml2_IdPMetadataParser Case: IdP metadata contains multiple signature cert and encrypt cert that is the same """ xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata_same_sign_and_encrypt_cert.xml')) data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) expected_settings_json = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" }, "idp": { "x509cert": "MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbTAeFw0xMzA2MDUxNzE2MjBaFw0xODA2MDUxNzE2MjBaMGcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQHDAxTYW50YSBNb25pY2ExETAPBgNVBAoMCE9uZUxvZ2luMRkwFwYDVQQDDBBhcHAub25lbG9naW4uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAse8rnep4qL2GmhH10pMQyJ2Jae+AQHyfgVjaQZ7Z0QQog5jX91vcJRSMi0XWJnUtOr6lF0dq1+yckjZ92wyLrH+7fvngNO1aV4Mjk9sTgf+iqMrae6y6fRxDt9PXrEFVjvd3vv7QTJf2FuIPy4vVP06Dt8EMkQIr8rmLmU0mTr1k2DkrdtdlCuNFTXuAu3QqfvNCRrRwfNObn9MP6JeOUdcGLJsBjGF8exfcN1SFzRF0JFr3dmOlx761zK5liD0T1sYWnDquatj/JD9fZMbKecBKni1NglH/LVd+b6aJUAr5LulERULUjLqYJRKW31u91/4Qazdo9tbvwqyFxaoUrwIDAQABo4HUMIHRMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFPWcXvQSlTXnzZD2xziuoUvrrDedMIGRBgNVHSMEgYkwgYaAFPWcXvQSlTXnzZD2xziuoUvrrDedoWukaTBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbYIBATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEFBQADggEBAB/8xe3rzqXQVxzHyAHuAuPa73ClDoL1cko0Fp8CGcqEIyj6Te9gx5z6wyfv+Lo8RFvBLlnB1lXqbC+fTGcVgG/4oKLJ5UwRFxInqpZPnOAudVNnd0PYOODn9FWs6u+OTIQIaIcPUv3MhB9lwHIJsTk/bs9xcru5TPyLIxLLd6ib/pRceKH2mTkzUd0DYk9CQNXXeoGx/du5B9nh3ClPTbVakRzl3oswgI5MQIphYxkW70SopEh4kOFSRE1ND31NNIq1YrXlgtkguQBFsZWuQOPR6cEwFZzP0tHTYbI839WgxX6hfhIUTUz6mLqq4+3P4BG3+1OXeVDg63y8Uh781sE=", "entityId": "https://app.onelogin.com/saml/metadata/383123", "singleSignOnService": { "url": "https://app.onelogin.com/trust/saml2/http-post/sso/383123", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, data) xml_idp_metadata_2 = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata_different_sign_and_encrypt_cert.xml')) data_2 = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata_2) expected_settings_json_2 = """ { "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" }, "idp": { "x509certMulti": { "encryption": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==" ], "signing": [ "MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbTAeFw0xMzA2MDUxNzE2MjBaFw0xODA2MDUxNzE2MjBaMGcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQHDAxTYW50YSBNb25pY2ExETAPBgNVBAoMCE9uZUxvZ2luMRkwFwYDVQQDDBBhcHAub25lbG9naW4uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAse8rnep4qL2GmhH10pMQyJ2Jae+AQHyfgVjaQZ7Z0QQog5jX91vcJRSMi0XWJnUtOr6lF0dq1+yckjZ92wyLrH+7fvngNO1aV4Mjk9sTgf+iqMrae6y6fRxDt9PXrEFVjvd3vv7QTJf2FuIPy4vVP06Dt8EMkQIr8rmLmU0mTr1k2DkrdtdlCuNFTXuAu3QqfvNCRrRwfNObn9MP6JeOUdcGLJsBjGF8exfcN1SFzRF0JFr3dmOlx761zK5liD0T1sYWnDquatj/JD9fZMbKecBKni1NglH/LVd+b6aJUAr5LulERULUjLqYJRKW31u91/4Qazdo9tbvwqyFxaoUrwIDAQABo4HUMIHRMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFPWcXvQSlTXnzZD2xziuoUvrrDedMIGRBgNVHSMEgYkwgYaAFPWcXvQSlTXnzZD2xziuoUvrrDedoWukaTBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbYIBATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEFBQADggEBAB/8xe3rzqXQVxzHyAHuAuPa73ClDoL1cko0Fp8CGcqEIyj6Te9gx5z6wyfv+Lo8RFvBLlnB1lXqbC+fTGcVgG/4oKLJ5UwRFxInqpZPnOAudVNnd0PYOODn9FWs6u+OTIQIaIcPUv3MhB9lwHIJsTk/bs9xcru5TPyLIxLLd6ib/pRceKH2mTkzUd0DYk9CQNXXeoGx/du5B9nh3ClPTbVakRzl3oswgI5MQIphYxkW70SopEh4kOFSRE1ND31NNIq1YrXlgtkguQBFsZWuQOPR6cEwFZzP0tHTYbI839WgxX6hfhIUTUz6mLqq4+3P4BG3+1OXeVDg63y8Uh781sE=" ] }, "entityId": "https://app.onelogin.com/saml/metadata/383123", "singleSignOnService": { "url": "https://app.onelogin.com/trust/saml2/http-post/sso/383123", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } } } """ expected_settings_2 = json.loads(expected_settings_json_2) self.assertEqual(expected_settings_2, data_2) def test_merge_settings(self): """ Tests the merge_settings method of the OneLogin_Saml2_IdPMetadataParser """ with self.assertRaises(TypeError): settings_result = OneLogin_Saml2_IdPMetadataParser.merge_settings(None, {}) with self.assertRaises(TypeError): settings_result = OneLogin_Saml2_IdPMetadataParser.merge_settings({}, None) xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata.xml')) # Parse XML metadata. data = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) # Read base settings. settings = self.loadSettingsJSON() # Merge settings from XML metadata into base settings, # let XML metadata have priority if there are conflicting # attributes. settings_result = OneLogin_Saml2_IdPMetadataParser.merge_settings(settings, data) # Generate readable JSON representation: # print("%s" % json.dumps(settings_result, indent=2).replace(r'\n', r'\\n')) expected_settings_json = """ { "custom_base_path": "../../../tests/data/customPath/", "contactPerson": { "support": { "emailAddress": "support@example.com", "givenName": "support_name" }, "technical": { "emailAddress": "technical@example.com", "givenName": "technical_name" } }, "idp": { "singleSignOnService": { "url": "https://app.onelogin.com/trust/saml2/http-post/sso/383123", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "entityId": "https://app.onelogin.com/saml/metadata/383123", "singleLogoutService": { "url": "http://idp.example.com/SingleLogoutService.php" }, "x509cert": "MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbTAeFw0xMzA2MDUxNzE2MjBaFw0xODA2MDUxNzE2MjBaMGcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQHDAxTYW50YSBNb25pY2ExETAPBgNVBAoMCE9uZUxvZ2luMRkwFwYDVQQDDBBhcHAub25lbG9naW4uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAse8rnep4qL2GmhH10pMQyJ2Jae+AQHyfgVjaQZ7Z0QQog5jX91vcJRSMi0XWJnUtOr6lF0dq1+yckjZ92wyLrH+7fvngNO1aV4Mjk9sTgf+iqMrae6y6fRxDt9PXrEFVjvd3vv7QTJf2FuIPy4vVP06Dt8EMkQIr8rmLmU0mTr1k2DkrdtdlCuNFTXuAu3QqfvNCRrRwfNObn9MP6JeOUdcGLJsBjGF8exfcN1SFzRF0JFr3dmOlx761zK5liD0T1sYWnDquatj/JD9fZMbKecBKni1NglH/LVd+b6aJUAr5LulERULUjLqYJRKW31u91/4Qazdo9tbvwqyFxaoUrwIDAQABo4HUMIHRMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFPWcXvQSlTXnzZD2xziuoUvrrDedMIGRBgNVHSMEgYkwgYaAFPWcXvQSlTXnzZD2xziuoUvrrDedoWukaTBnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMU2FudGEgTW9uaWNhMREwDwYDVQQKDAhPbmVMb2dpbjEZMBcGA1UEAwwQYXBwLm9uZWxvZ2luLmNvbYIBATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEFBQADggEBAB/8xe3rzqXQVxzHyAHuAuPa73ClDoL1cko0Fp8CGcqEIyj6Te9gx5z6wyfv+Lo8RFvBLlnB1lXqbC+fTGcVgG/4oKLJ5UwRFxInqpZPnOAudVNnd0PYOODn9FWs6u+OTIQIaIcPUv3MhB9lwHIJsTk/bs9xcru5TPyLIxLLd6ib/pRceKH2mTkzUd0DYk9CQNXXeoGx/du5B9nh3ClPTbVakRzl3oswgI5MQIphYxkW70SopEh4kOFSRE1ND31NNIq1YrXlgtkguQBFsZWuQOPR6cEwFZzP0tHTYbI839WgxX6hfhIUTUz6mLqq4+3P4BG3+1OXeVDg63y8Uh781sE=" }, "sp": { "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress", "entityId": "http://stuff.com/endpoints/metadata.php", "assertionConsumerService": { "url": "http://stuff.com/endpoints/endpoints/acs.php" }, "singleLogoutService": { "url": "http://stuff.com/endpoints/endpoints/sls.php" } }, "security": { "wantAssertionsSigned": false, "authnRequestsSigned": false, "signMetadata": false }, "debug": false, "organization": { "en-US": { "displayname": "SP test", "url": "http://sp.example.com", "name": "sp_test" } }, "strict": false } """ expected_settings = json.loads(expected_settings_json) self.assertEqual(expected_settings, settings_result) # Commute merge operation. As the order determines which settings # dictionary has priority, here we expect a different result. settings_result2 = OneLogin_Saml2_IdPMetadataParser.merge_settings(data, settings) expected_settings2_json = """ { "debug": false, "idp": { "singleLogoutService": { "url": "http://idp.example.com/SingleLogoutService.php" }, "singleSignOnService": { "url": "http://idp.example.com/SSOService.php", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "entityId": "http://idp.example.com/", "x509cert": "MIICgTCCAeoCCQCbOlrWDdX7FTANBgkqhkiG9w0BAQUFADCBhDELMAkGA1UEBhMCTk8xGDAWBgNVBAgTD0FuZHJlYXMgU29sYmVyZzEMMAoGA1UEBxMDRm9vMRAwDgYDVQQKEwdVTklORVRUMRgwFgYDVQQDEw9mZWlkZS5lcmxhbmcubm8xITAfBgkqhkiG9w0BCQEWEmFuZHJlYXNAdW5pbmV0dC5ubzAeFw0wNzA2MTUxMjAxMzVaFw0wNzA4MTQxMjAxMzVaMIGEMQswCQYDVQQGEwJOTzEYMBYGA1UECBMPQW5kcmVhcyBTb2xiZXJnMQwwCgYDVQQHEwNGb28xEDAOBgNVBAoTB1VOSU5FVFQxGDAWBgNVBAMTD2ZlaWRlLmVybGFuZy5ubzEhMB8GCSqGSIb3DQEJARYSYW5kcmVhc0B1bmluZXR0Lm5vMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDivbhR7P516x/S3BqKxupQe0LONoliupiBOesCO3SHbDrl3+q9IbfnfmE04rNuMcPsIxB161TdDpIesLCn7c8aPHISKOtPlAeTZSnb8QAu7aRjZq3+PbrP5uW3TcfCGPtKTytHOge/OlJbo078dVhXQ14d1EDwXJW1rRXuUt4C8QIDAQABMA0GCSqGSIb3DQEBBQUAA4GBACDVfp86HObqY+e8BUoWQ9+VMQx1ASDohBjwOsg2WykUqRXF+dLfcUH9dWR63CtZIKFDbStNomPnQz7nbK+onygwBspVEbnHuUihZq3ZUdmumQqCw4Uvs/1Uvq3orOo/WJVhTyvLgFVK2QarQ4/67OZfHd7R+POBXhophSMv1ZOo" }, "security": { "authnRequestsSigned": false, "wantAssertionsSigned": false, "signMetadata": false }, "contactPerson": { "technical": { "emailAddress": "technical@example.com", "givenName": "technical_name" }, "support": { "emailAddress": "support@example.com", "givenName": "support_name" } }, "strict": false, "sp": { "singleLogoutService": { "url": "http://stuff.com/endpoints/endpoints/sls.php" }, "assertionConsumerService": { "url": "http://stuff.com/endpoints/endpoints/acs.php" }, "entityId": "http://stuff.com/endpoints/metadata.php", "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" }, "custom_base_path": "../../../tests/data/customPath/", "organization": { "en-US": { "displayname": "SP test", "url": "http://sp.example.com", "name": "sp_test" } } } """ expected_settings2 = json.loads(expected_settings2_json) self.assertEqual(expected_settings2, settings_result2) # Test merging multiple certs xml_idp_metadata = self.file_contents(join(self.data_path, 'metadata', 'idp_metadata_multi_certs.xml')) data3 = OneLogin_Saml2_IdPMetadataParser.parse(xml_idp_metadata) settings_result3 = OneLogin_Saml2_IdPMetadataParser.merge_settings(settings, data3) expected_settings3_json = """ { "debug": false, "strict": false, "custom_base_path": "../../../tests/data/customPath/", "sp": { "singleLogoutService": { "url": "http://stuff.com/endpoints/endpoints/sls.php" }, "assertionConsumerService": { "url": "http://stuff.com/endpoints/endpoints/acs.php" }, "entityId": "http://stuff.com/endpoints/metadata.php", "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" }, "idp": { "singleLogoutService": { "url": "https://idp.examle.com/saml/slo", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "x509certMulti": { "encryption": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==" ], "signing": [ "MIIEZTCCA02gAwIBAgIUPyy/A3bZAZ4m28PzEUUoT7RJhxIwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UEBhMCVVMxKzApBgNVBAoMIk9uZUxvZ2luIFRlc3QgKHNnYXJjaWEtdXMtcHJlcHJvZCkxFTATBgNVBAsMDE9uZUxvZ2luIElkUDEfMB0GA1UEAwwWT25lTG9naW4gQWNjb3VudCA4OTE0NjAeFw0xNjA4MDQyMjI5MzdaFw0yMTA4MDUyMjI5MzdaMHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDN6iqQGcLOCglNO42I2rkzE05UXSiMXT6c8ALThMMiaDw6qqzo3sd/tKK+NcNKWLIIC8TozWVyh5ykUiVZps+08xil7VsTU7E+wKu3kvmOsvw2wlRwtnoKZJwYhnr+RkBa+h1r3ZYUgXm1ZPeHMKj1g18KaWz9+MxYL6BhKqrOzfW/P2xxVRcFH7/pq+ZsDdgNzD2GD+apzY4MZyZj/N6BpBWJ0GlFsmtBegpbX3LBitJuFkk5L4/U/jjF1AJa3boBdCUVfATqO5G03H4XS1GySjBIRQXmlUF52rLjg6xCgWJ30/+t1X+IHLJeixiQ0vxyh6C4/usCEt94cgD1r8ADAgMBAAGjgfIwge8wDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUPW0DcH0G3IwynWgi74co4wZ6n7gwga8GA1UdIwSBpzCBpIAUPW0DcH0G3IwynWgi74co4wZ6n7ihdqR0MHIxCzAJBgNVBAYTAlVTMSswKQYDVQQKDCJPbmVMb2dpbiBUZXN0IChzZ2FyY2lhLXVzLXByZXByb2QpMRUwEwYDVQQLDAxPbmVMb2dpbiBJZFAxHzAdBgNVBAMMFk9uZUxvZ2luIEFjY291bnQgODkxNDaCFD8svwN22QGeJtvD8xFFKE+0SYcSMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQUFAAOCAQEAQhB4q9jrycwbHrDSoYR1X4LFFzvJ9Us75wQquRHXpdyS9D6HUBXMGI6ahPicXCQrfLgN8vzMIiqZqfySXXv/8/dxe/X4UsWLYKYJHDJmxXD5EmWTa65chjkeP1oJAc8f3CKCpcP2lOBTthbnk2fEVAeLHR4xNdQO0VvGXWO9BliYPpkYqUIBvlm+Fg9mF7AM/Uagq2503XXIE1Lq//HON68P10vNMwLSKOtYLsoTiCnuIKGJqG37MsZVjQ1ZPRcO+LSLkq0i91gFxrOrVCrgztX4JQi5XkvEsYZGIXXjwHqxTVyt3adZWQO0LPxPqRiUqUzyhDhLo/xXNrHCu4VbMw==", "MIICZDCCAc2gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBPMQswCQYDVQQGEwJ1czEUMBIGA1UECAwLZXhhbXBsZS5jb20xFDASBgNVBAoMC2V4YW1wbGUuY29tMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xNzA0MTUxNjMzMThaFw0xODA0MTUxNjMzMThaME8xCzAJBgNVBAYTAnVzMRQwEgYDVQQIDAtleGFtcGxlLmNvbTEUMBIGA1UECgwLZXhhbXBsZS5jb20xFDASBgNVBAMMC2V4YW1wbGUuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC6GLkl5lDUZdHNDAojp5i24OoPlqrt5TGXJIPqAZYT1hQvJW5nv17MFDHrjmtEnmW4ACKEy0fAX80QWIcHunZSkbEGHb+NG/6oTi5RipXMvmHnfFnPJJ0AdtiLiPE478CV856gXekV4Xx5u3KrylcOgkpYsp0GMIQBDzleMUXlYQIDAQABo1AwTjAdBgNVHQ4EFgQUnP8vlYPGPL2n6ZzDYij2kMDC8wMwHwYDVR0jBBgwFoAUnP8vlYPGPL2n6ZzDYij2kMDC8wMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOBgQAlQGAl+b8Cpot1g+65lLLjVoY7APJPWLW0klKQNlMU0s4MU+71Y3ExUEOXDAZgKcFoavb1fEOGMwEf38NaJAy1e/l6VNuixXShffq20ymqHQxOG0q8ujeNkgZF9k6XDfn/QZ3AD0o/IrCT7UMc/0QsfgIjWYxwCvp2syApc5CYfQ==" ] }, "entityId": "https://idp.examle.com/saml/metadata", "singleSignOnService": { "url": "https://idp.examle.com/saml/sso", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } }, "security": { "authnRequestsSigned": false, "wantAssertionsSigned": false, "signMetadata": false }, "contactPerson": { "technical": { "emailAddress": "technical@example.com", "givenName": "technical_name" }, "support": { "emailAddress": "support@example.com", "givenName": "support_name" } }, "organization": { "en-US": { "displayname": "SP test", "url": "http://sp.example.com", "name": "sp_test" } } } """ expected_settings3 = json.loads(expected_settings3_json) self.assertEqual(expected_settings3, settings_result3) if __name__ == '__main__': runner = unittest.TextTestRunner() unittest.main(testRunner=runner)
78.516975
1,811
0.778926
2,811
50,879
13.930274
0.144788
0.019255
0.032943
0.010726
0.822335
0.811048
0.800373
0.786021
0.771388
0.761454
0
0.079151
0.154484
50,879
647
1,812
78.638331
0.831098
0.046758
0
0.546845
0
0.10325
0.792281
0.591691
0
1
0
0
0.068834
1
0.024857
false
0
0.015296
0
0.051625
0
0
0
1
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
11
6b32df9f64c8c19fb4c9cf60f29c0e1a18804d26
6,341
py
Python
functions/getter.py
mramirid/Whatsapp-Bot-Covid19
29472c9d9918f819296a07d1436b0988e5bbe73a
[ "MIT" ]
null
null
null
functions/getter.py
mramirid/Whatsapp-Bot-Covid19
29472c9d9918f819296a07d1436b0988e5bbe73a
[ "MIT" ]
null
null
null
functions/getter.py
mramirid/Whatsapp-Bot-Covid19
29472c9d9918f819296a07d1436b0988e5bbe73a
[ "MIT" ]
null
null
null
global mysql def init_connection(new_mysql): global mysql mysql = new_mysql ################### Nasional ################### def get_nasional(): today = get_today_nasional() yesterday = get_yesterday_nasional() # Memgambil index array agar saat pemanggilan variabel mudah, tidak today[0] dst if len(today) > 0: positif = 1 sembuh = 2 meninggal = 3 perawatan = 4 datetime = 6 # Penambahan masing2 kasus positif, sembuh & meninggal dari kemarin selisih_positif = today[positif] - yesterday[positif] selisih_sembuh = today[sembuh] - yesterday[sembuh] selisih_meninggal = today[meninggal] - yesterday[meninggal] selisih_perawatan = today[perawatan] - yesterday[perawatan] # Selisih total kasus dari kemarin total_yesterday = yesterday[positif] + \ yesterday[sembuh] + yesterday[meninggal] total_today = today[positif] + today[sembuh] + today[meninggal] selisih_total = total_today - total_yesterday tempTime = str(today[datetime]) readableTime = tempTime[11:16] message = '' if selisih_total > 0: message += 'Statistik kasus di Indonesia\n\n' message += '- Positif: {} (+{})\n'.format( today[positif], abs(selisih_positif)) message += '- Sembuh: {} (+{})\n'.format( today[sembuh], abs(selisih_sembuh)) message += '- Meninggal: {} (+{})\n'.format( today[meninggal], abs(selisih_meninggal)) message += '- Dalam perawatan: {} (+{})\n\n'.format( today[perawatan], abs(selisih_perawatan)) else: message += 'Statistik kasus di Indonesia\n\n' message += '- Positif: {}\n'.format(today[positif]) message += '- Sembuh: {}\n'.format(today[sembuh]) message += '- Meninggal: {}\n'.format(today[meninggal]) message += '- Dalam perawatan: {}\n\n'.format(today[perawatan]) message += 'Tetap jaga kesehatan dan apabila memungkinkan #DirumahAja\n\n' message += 'Pembaruan terakhir pada {}'.format(readableTime) else: return False return message def get_today_nasional(): cur = mysql.connection.cursor() cur.execute("SELECT * FROM nasional WHERE DATE(created_at) = CURDATE()") data = cur.fetchone() cur.close() return data def get_yesterday_nasional(): cur = mysql.connection.cursor() cur.execute("SELECT * FROM nasional WHERE DATE(created_at) = CURDATE()-1") data = cur.fetchone() cur.close() return data ################### End of Nasional ################### ################### Provinsi ################### def get_prov_byname(name): today = get_today_prov_byname(name) yesterday = get_yesterday_prov_byname(name) if len(today) > 0: # Index, mempermudah saja datetime = 0 nama_provinsi = 1 positif = 2 sembuh = 3 perawatan = 4 meninggal = 5 # Penambahan masing2 kasus positif, sembuh & meninggal dari kemarin selisih_positif = today[positif] - yesterday[positif] selisih_sembuh = today[sembuh] - yesterday[sembuh] selisih_meninggal = today[meninggal] - yesterday[meninggal] selisih_perawatan = today[perawatan] - yesterday[perawatan] # Selisih total kasus dari kemarin total_yesterday = yesterday[positif] + \ yesterday[sembuh] + yesterday[meninggal] total_today = today[positif] + today[sembuh] + today[meninggal] selisih_total = total_today - total_yesterday tempTime = str(today[datetime]) readableTime = tempTime[11:16] message = '' if selisih_total > 0: message += 'Statistik kasus di {}\n\n'.format(today[nama_provinsi]) message += '- Positif: {} (+{})\n'.format( today[positif], abs(selisih_positif)) message += '- Sembuh: {} (+{})\n'.format( today[sembuh], abs(selisih_sembuh)) message += '- Meninggal: {} (+{})\n'.format( today[meninggal], abs(selisih_meninggal)) message += '- Dalam perawatan: {} (+{})\n\n'.format( today[perawatan], abs(selisih_perawatan)) else: message += 'Statistik kasus di {}\n\n'.format(today[nama_provinsi]) message += '- Positif: {}\n'.format(today[positif]) message += '- Sembuh: {}\n'.format(today[sembuh]) message += '- Meninggal: {}\n'.format(today[meninggal]) message += '- Dalam perawatan: {}\n\n'.format(today[perawatan]) message += 'Tetap jaga kesehatan dan apabila memungkinkan #DirumahAja\n\n' message += 'Pembaruan terakhir pada {}'.format(readableTime) else: return False return message def get_today_prov_byname(name): cur = mysql.connection.cursor() cur.execute('''SELECT pengambilan_provinsi.updated_at, nama_provinsi, positif, sembuh, dalam_perawatan, meninggal FROM pengambilan_provinsi LEFT JOIN detail_pengambilan_provinsi ON pengambilan_provinsi.id = detail_pengambilan_provinsi.id_pengambilan_provinsi WHERE DATE(pengambilan_provinsi.created_at) = CURDATE() AND nama_provinsi LIKE '%{}%\''''.format(name)) data = cur.fetchone() cur.close() return data def get_yesterday_prov_byname(name): cur = mysql.connection.cursor() cur.execute('''SELECT pengambilan_provinsi.updated_at, nama_provinsi, positif, sembuh, dalam_perawatan, meninggal FROM pengambilan_provinsi LEFT JOIN detail_pengambilan_provinsi ON pengambilan_provinsi.id = detail_pengambilan_provinsi.id_pengambilan_provinsi WHERE DATE(pengambilan_provinsi.created_at) = CURDATE()-1 AND nama_provinsi LIKE '%{}%\''''.format(name)) data = cur.fetchone() cur.close() return data ################### End of Provinsi ###################
35.824859
121
0.577985
620
6,341
5.767742
0.158065
0.035235
0.060403
0.021812
0.888143
0.87472
0.87472
0.87472
0.87472
0.862696
0
0.006211
0.289071
6,341
176
122
36.028409
0.787045
0.055669
0
0.80916
0
0
0.306108
0.058339
0
0
0
0
0
1
0.053435
false
0
0
0
0.114504
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
860f893d646dbd52965b39832b915dbb5940b46c
8,513
py
Python
options/meta_options.py
Wanggcong/SolutionSimilarityLearning
26279b61686b3c34745c369b2cc4175c71c55403
[ "MIT" ]
7
2019-12-23T02:37:27.000Z
2020-09-05T08:08:22.000Z
options/meta_options.py
Wanggcong/SolutionSimilarityLearning
26279b61686b3c34745c369b2cc4175c71c55403
[ "MIT" ]
null
null
null
options/meta_options.py
Wanggcong/SolutionSimilarityLearning
26279b61686b3c34745c369b2cc4175c71c55403
[ "MIT" ]
null
null
null
import argparse import os class MetaOptions(): def __init__(self,parser,dataset_name): """Reset the class; indicates the class hasn't been initailized""" self.parser = parser self.dataset_name = dataset_name def initialize(self): self.parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') def initialize_datasets(self): """Define the common options that are used in both training and test.""" # basic parameters self.initialize() if self.dataset_name == 'mnist': self.parser.add_argument('--root-path', type=str, default='/media/data2/anonymous/projects/LearnableParameterSimilarity/weights/mnist', metavar='RP', help='root path for weights') self.parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for training (default: 64)') self.parser.add_argument('--test-batch-size', type=int, default=1, metavar='N', help='input batch size for testing (default: 1000)') self.parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') self.parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') self.parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.5)') self.parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') self.parser.add_argument('--not-save-model', action='store_true', default=True, help='For Saving the current Model') self.parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') self.parser.add_argument('--meta-model', type=str, default='cifar_mlp', metavar='M', help='meta model type') self.parser.add_argument('--step1', default=30, type=int, metavar='N', help='step1 lr') self.parser.add_argument('--log-file', type=str, default='', metavar='M', help='log file') self.parser.add_argument('--selected-layers', type=str, default='0', metavar='M', help='selected layers') self.parser.add_argument('--cls-or-retr', action='store_true', help='True for classification, False for retrieval.') elif self.dataset_name == 'cifar100' or self.dataset_name == 'TinyImageNet': self.parser.add_argument('--root-path', type=str, default='/media/data2/anonymous/projects/LearnableParameterSimilarity/weights/cifar100_100', metavar='RP', help='root path for weights') self.parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for training (default: 64)') self.parser.add_argument('--test-batch-size', type=int, default=1, metavar='N', help='input batch size for testing (default: 1000)') self.parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') self.parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') self.parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.5)') self.parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') self.parser.add_argument('--not-save-model', action='store_true', default=True, help='For Saving the current Model') self.parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') self.parser.add_argument('--meta-model', type=str, default='cifar_mlp', metavar='M', help='meta model type') self.parser.add_argument('--target-model', type=str, default='cifar_mlp', metavar='M', help='target model type') self.parser.add_argument('--log-file', type=str, default='', metavar='M', help='log file') self.parser.add_argument('--step1', default=30, type=int, metavar='N', help='step1 lr') self.parser.add_argument('--model-path', type=str, default='v1', metavar='M', help='model path') self.parser.add_argument('--selected-layers', type=str, default='0', metavar='M', help='selected layers') self.parser.add_argument('--cls-or-retr', action='store_true', help='True for classification, False for retrieval.') else: self.parser.add_argument('--root-path', type=str, default='/media/data2/anonymous/projects/LearnableParameterSimilarity/weights/cifar100_rnn_v1', metavar='RP', help='root path for weights') self.parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for training (default: 64)') self.parser.add_argument('--test-batch-size', type=int, default=1, metavar='N', help='input batch size for testing (default: 1000)') self.parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') self.parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') self.parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.5)') self.parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') self.parser.add_argument('--not-save-model', action='store_true', default=True, help='For Saving the current Model') self.parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') self.parser.add_argument('--meta-model', type=str, default='cifar_mlp', metavar='M', help='meta model type') self.parser.add_argument('--log-file', type=str, default='', metavar='M', help='log file') self.parser.add_argument('--step1', default=30, type=int, metavar='N', help='step1 lr') self.parser.add_argument('--model-path', type=str, default='v1', metavar='M', help='model path') self.parser.add_argument('--selected-layers', type=str, default='0', metavar='M', help='selected layers') self.parser.add_argument('--cls-or-retr', action='store_true', help='True for classification, False for retrieval.')
74.026087
163
0.513685
903
8,513
4.765227
0.140642
0.11155
0.138973
0.224495
0.898211
0.893098
0.893098
0.893098
0.893098
0.884034
0
0.022649
0.351697
8,513
114
164
74.675439
0.757021
0.017033
0
0.82243
0
0
0.271746
0.028994
0
0
0
0
0
1
0.028037
false
0
0.018692
0
0.056075
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
862e76885428e343b1cd6f92a768d1a129846329
6,038
py
Python
tests/utils/file_utils/test_safe_file_write.py
Purg/SMQTK
705a2b2979935ed129aac7db578571c4ae1343e7
[ "BSD-3-Clause" ]
1
2021-04-25T16:53:50.000Z
2021-04-25T16:53:50.000Z
tests/utils/file_utils/test_safe_file_write.py
Purg/SMQTK
705a2b2979935ed129aac7db578571c4ae1343e7
[ "BSD-3-Clause" ]
3
2021-09-08T02:17:49.000Z
2022-03-12T00:40:33.000Z
tests/utils/file_utils/test_safe_file_write.py
Purg/SMQTK
705a2b2979935ed129aac7db578571c4ae1343e7
[ "BSD-3-Clause" ]
null
null
null
import mock import unittest from smqtk.utils.file import safe_file_write class TestSafeFileWrite (unittest.TestCase): """ Tests for the ``smqtk.utils.file.safe_file_write`` function. Mocking out underlying function that would have filesystem side effects. """ @mock.patch('smqtk.utils.file.safe_create_dir') @mock.patch('smqtk.utils.file.os.rename') @mock.patch('smqtk.utils.file.os.remove') @mock.patch('smqtk.utils.file.tempfile.NamedTemporaryFile') def test_safe_file_write_relative_simple( self, m_NTF, m_remove, m_rename, m_scd): # Experimental filepath and content. fp = 'bar.txt' expected_bytes = 'hello world' # Mock return for temp file creation so we can check os.* calls. m_file = m_NTF.return_value test_tmp_fp = 'temp fp' m_file.name = test_tmp_fp safe_file_write(fp, expected_bytes) m_scd.assert_called_once_with('') m_NTF.assert_called_once_with(suffix='.txt', prefix='bar.', dir='', delete=False) m_file.write.assert_called_once_with(expected_bytes) m_file.__exit__.assert_called_once_with(None, None, None) self.assertEqual(m_remove.call_count, 0) m_rename.assert_called_once_with(test_tmp_fp, fp) @mock.patch('smqtk.utils.file.safe_create_dir') @mock.patch('smqtk.utils.file.os.rename') @mock.patch('smqtk.utils.file.os.remove') @mock.patch('smqtk.utils.file.tempfile.NamedTemporaryFile') def test_safe_file_write_relative_subdir( self, m_NTF, m_remove, m_rename, m_scd): # Experimental filepath and content. fp = 'foo/other/bar.txt' expected_bytes = 'hello world' # Mock return for temp file creation so we can check os.* calls. m_file = m_NTF.return_value test_tmp_fp = 'temp fp' m_file.name = test_tmp_fp safe_file_write(fp, expected_bytes) m_scd.assert_called_once_with('foo/other') m_NTF.assert_called_once_with(suffix='.txt', prefix='bar.', dir='foo/other', delete=False) m_file.write.assert_called_once_with(expected_bytes) m_file.__exit__.assert_called_once_with(None, None, None) self.assertEqual(m_remove.call_count, 0) m_rename.assert_called_once_with(test_tmp_fp, fp) @mock.patch('smqtk.utils.file.safe_create_dir') @mock.patch('smqtk.utils.file.os.rename') @mock.patch('smqtk.utils.file.os.remove') @mock.patch('smqtk.utils.file.tempfile.NamedTemporaryFile') def test_safe_file_write_custom_tmp_dir( self, m_NTF, m_remove, m_rename, m_scd): # Experimental filepath and content. fp = 'foo/other/bar.txt' expected_bytes = 'hello world' custom_tmp_dir = '/some/other/directory' # Mock return for temp file creation so we can check os.* calls. m_file = m_NTF.return_value test_tmp_fp = 'temp fp' m_file.name = test_tmp_fp safe_file_write(fp, expected_bytes, custom_tmp_dir) m_scd.assert_called_once_with('foo/other') m_NTF.assert_called_once_with(suffix='.txt', prefix='bar.', dir=custom_tmp_dir, delete=False) m_file.write.assert_called_once_with(expected_bytes) m_file.__exit__.assert_called_once_with(None, None, None) self.assertEqual(m_remove.call_count, 0) m_rename.assert_called_once_with(test_tmp_fp, fp) @mock.patch('smqtk.utils.file.safe_create_dir') @mock.patch('smqtk.utils.file.os.rename') @mock.patch('smqtk.utils.file.os.remove') @mock.patch('smqtk.utils.file.tempfile.NamedTemporaryFile') def test_safe_file_write_absolute( self, m_NTF, m_remove, m_rename, m_scd): # Experimental filepath and content. fp = '/some/absolute/dir/bar.txt' expected_bytes = 'hello world' # Mock return for temp file creation so we can check os.* calls. m_file = m_NTF.return_value test_tmp_fp = 'temp fp' m_file.name = test_tmp_fp safe_file_write(fp, expected_bytes) m_scd.assert_called_once_with('/some/absolute/dir') m_NTF.assert_called_once_with(suffix='.txt', prefix='bar.', dir='/some/absolute/dir', delete=False) m_file.write.assert_called_once_with(expected_bytes) m_file.__exit__.assert_called_once_with(None, None, None) self.assertEqual(m_remove.call_count, 0) m_rename.assert_called_once_with(test_tmp_fp, fp) @mock.patch('smqtk.utils.file.safe_create_dir') @mock.patch('smqtk.utils.file.os.rename') @mock.patch('smqtk.utils.file.os.remove') @mock.patch('smqtk.utils.file.tempfile.NamedTemporaryFile') def test_safe_file_write_raising_write( self, m_NTF, m_remove, m_rename, m_scd): # Test for what happens when file.write raises an exception. # Experimental filepath and content. fp = 'bar.txt' expected_bytes = 'hello world' # Mock return for temp file creation so we can check os.* calls. m_file = m_NTF.return_value test_tmp_fp = 'temp fp' m_file.name = test_tmp_fp # Mock return from write simulating not all bytes being written. m_file.write.side_effect = OSError self.assertRaises( OSError, safe_file_write, fp, expected_bytes ) m_scd.assert_called_once_with('') m_NTF.assert_called_once_with(suffix='.txt', prefix='bar.', dir='', delete=False) m_file.write.assert_called_once_with(expected_bytes) # Remove should now be called on temp file path self.assertEqual(m_remove.call_count, 1) m_remove.assert_called_once_with(test_tmp_fp) self.assertEqual(m_file.__exit__.call_count, 1) # Rename should no longer be called. self.assertEqual(m_rename.call_count, 0)
40.52349
77
0.662637
845
6,038
4.413018
0.126627
0.077233
0.102977
0.128721
0.828104
0.828104
0.819791
0.812014
0.812014
0.805042
0
0.001516
0.235343
6,038
148
78
40.797297
0.806151
0.137132
0
0.761905
0
0
0.179254
0.132702
0
0
0
0
0.304762
1
0.047619
false
0
0.028571
0
0.085714
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
86643ebec6d6dd6c7a0c2a878a60211ba8097fa4
3,820
py
Python
Product Manager/style.py
Vatsalgarg2000/Product_Manager
c129461233d1a394a2cf3365186994ca414bd74e
[ "Apache-2.0" ]
null
null
null
Product Manager/style.py
Vatsalgarg2000/Product_Manager
c129461233d1a394a2cf3365186994ca414bd74e
[ "Apache-2.0" ]
null
null
null
Product Manager/style.py
Vatsalgarg2000/Product_Manager
c129461233d1a394a2cf3365186994ca414bd74e
[ "Apache-2.0" ]
null
null
null
def searchBoxStyle(): return """ QGroupBox{ background-color:#9bc9ff; font:15pt Times Bold; color:white; border:2px solid gray; border-radius:15px; } """ def listBoxStyle(): return """ QGroupBox{ background-color:#fcc324; font:15pt Arial Bold; color:white; border:2px solid gray; border-radius:15px; } """ def searchButtonStyle(): return """ QPushButton{ background-color:#fcc324; border-style:outset; border-width:2px; border-radius:10px; border-color:beige; font:12px; padding:6px; min-width:6em; } """ def listButtonStyle(): return """ QPushButton{ background-color:#9bc9ff; border-style:outset; border-width:2px; border-radius:10px; border-color:beige; font:12px; padding:6px; min-width:6em; } """ def productBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def productTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def memberTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def memberBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def sellProductTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def sellProductBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def confirmProductTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def confirmProductBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def addMemberTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def addMemberBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def addProductTopFrame(): return """ QFrame{ background-color:white; font:20pt Times Bold; } """ def addProductBottomFrame(): return """ QFrame{ background-color:#fcc324; font:15pt Times Bold; } """ def memberSearchBoxStyle(): return """ QGroupBox{ background-color:#9bc9ff; font:15pt Times Bold; color:white; border:2px solid gray; border-radius:15px; } """
22.209302
46
0.393194
244
3,820
6.155738
0.20082
0.169774
0.175766
0.215712
0.73036
0.711052
0.711052
0.711052
0.711052
0.711052
0
0.045109
0.518325
3,820
172
47
22.209302
0.771196
0
0
0.75
0
0
0.792055
0.113151
0
0
0
0
0
1
0.121429
true
0
0
0.121429
0.242857
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
1
0
0
1
0
0
0
11
86d4c5029690fffd504ce9f5d345ba75609792f1
11,792
py
Python
tests/pyformance_test.py
Starz-Github/signalfx-python
2d07b0f0ffb91ccba7071eafab306673c3d71cb7
[ "Apache-2.0" ]
41
2015-06-17T16:44:25.000Z
2021-08-16T15:12:44.000Z
tests/pyformance_test.py
Starz-Github/signalfx-python
2d07b0f0ffb91ccba7071eafab306673c3d71cb7
[ "Apache-2.0" ]
74
2015-05-07T19:36:34.000Z
2021-12-29T15:29:33.000Z
tests/pyformance_test.py
Starz-Github/signalfx-python
2d07b0f0ffb91ccba7071eafab306673c3d71cb7
[ "Apache-2.0" ]
46
2015-05-07T23:23:07.000Z
2022-02-28T20:55:14.000Z
#!/usr/bin/env python # Copyright (C) 2018 SignalFx, Inc. All rights reserved. from pyformance.registry import get_qualname import os import sys import unittest sys.path.insert(0, os.path.join( os.path.dirname(os.path.abspath(__file__)), '..')) # import the signalfx pyformance library import signalfx.pyformance as pyf # noqa class TestPyformance(unittest.TestCase): def tearDown(self): pyf.clear() def test_gauge(self): reg = pyf.MetricsRegistry() reg.gauge('test_gauge').set_value(1) reg.gauge('test_gauge_with_dim', default=3, gauge_dim='hello_gauge').set_value(2) self.assertEqual( reg.metadata.get_metadata( 'gauge_dim=hello_gauge.test_gauge_with_dim'), { 'dimensions': {'gauge_dim': 'hello_gauge'}, 'metric': 'test_gauge_with_dim', }) self.assertEqual(reg.dump_metrics(), { 'test_gauge': {'value': 1}, 'gauge_dim=hello_gauge.test_gauge_with_dim': {'value': 2}, } ) reg.clear() self.assertEqual(reg.dump_metrics(), {}) self.assertEqual(len(reg.metadata._metadata), 0) def test_global_gauge(self): pyf.gauge('test_gauge').set_value(1) pyf.gauge('test_gauge_with_dim', default=3, gauge_dim='hello_gauge').set_value(2) self.assertEqual( pyf.global_registry().metadata.get_metadata( 'gauge_dim=hello_gauge.test_gauge_with_dim'), { 'dimensions': {'gauge_dim': 'hello_gauge'}, 'metric': 'test_gauge_with_dim', }) self.assertEqual(pyf.dump_metrics(), { 'test_gauge': {'value': 1}, 'gauge_dim=hello_gauge.test_gauge_with_dim': {'value': 2}, } ) def test_counter(self): reg = pyf.MetricsRegistry() reg.counter('test_counter').inc() reg.counter('test_counter_with_dim', counter_dim='hello_counter').inc() self.assertEqual( reg.metadata.get_metadata( 'counter_dim=hello_counter.test_counter_with_dim'), { 'dimensions': {'counter_dim': 'hello_counter'}, 'metric': 'test_counter_with_dim', }) self.assertEqual(reg.dump_metrics(), { 'test_counter': {'count': 1}, 'counter_dim=hello_counter.test_counter_with_dim': {'count': 1}, }) reg.clear() self.assertEqual(reg.dump_metrics(), {}) self.assertEqual(len(reg.metadata._metadata), 0) def test_global_counter(self): pyf.counter('test_counter').inc() pyf.counter('test_counter_with_dim', counter_dim='hello_counter').inc() self.assertEqual( pyf.global_registry().metadata.get_metadata( 'counter_dim=hello_counter.test_counter_with_dim'), { 'dimensions': {'counter_dim': 'hello_counter'}, 'metric': 'test_counter_with_dim', }) self.assertEqual(pyf.dump_metrics(), { 'test_counter': {'count': 1}, 'counter_dim=hello_counter.test_counter_with_dim': {'count': 1}, }) def test_counter_decorator(self): @pyf.count_calls def callme(): pass qcallme = get_qualname(callme) @pyf.count_calls_with_dims(counter_dim='hello_counter') def callme_with_dims(): pass qcallme_with_dims = get_qualname(callme_with_dims) callme() callme_with_dims() if sys.version_info[0] < 3: self.assertEqual( pyf.global_registry().metadata.get_metadata( 'counter_dim=hello_counter.{0}_calls'.format( qcallme_with_dims)), { 'dimensions': {'counter_dim': 'hello_counter'}, 'metric': '{0}_calls'.format(qcallme_with_dims), }) self.assertEqual(pyf.dump_metrics(), { '{0}_calls'.format(qcallme): {'count': 1}, 'counter_dim=hello_counter.{0}_calls'.format( qcallme_with_dims): {'count': 1}, }) def test_histogram(self): reg = pyf.MetricsRegistry() h1 = reg.histogram('test_histogram') h1.add(1) h1.add(1) h1.add(1) h2 = reg.histogram('test_histogram_with_dim', histogram_dim='hello_histogram') h2.add(1) h2.add(1) h2.add(1) metrics = reg.dump_metrics() self.assertEqual(metrics, { 'test_histogram': {'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, 'histogram_dim=hello_histogram.test_histogram_with_dim': {'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, }) reg.clear() self.assertEqual(reg.dump_metrics(), {}) self.assertEqual(len(reg.metadata._metadata), 0) def test_global_histogram(self): h1 = pyf.histogram('test_histogram') h1.add(1) h1.add(1) h1.add(1) h2 = pyf.histogram('test_histogram_with_dim', histogram_dim='hello_histogram') h2.add(1) h2.add(1) h2.add(1) self.assertEqual( pyf.global_registry().metadata.get_metadata( 'histogram_dim=hello_histogram.test_histogram_with_dim'), { 'dimensions': {'histogram_dim': 'hello_histogram'}, 'metric': 'test_histogram_with_dim', }) self.assertEqual(pyf.dump_metrics(), { 'test_histogram': {'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, 'histogram_dim=hello_histogram.test_histogram_with_dim': {'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, }) def test_histogram_decorator(self): @pyf.hist_calls def callme(): return 1 qcallme = get_qualname(callme) @pyf.hist_calls_with_dims(histogram_dim='hello_histogram') def callme_with_dims(): return 1 qcallme_with_dims = get_qualname(callme_with_dims) callme() callme() callme() callme_with_dims() callme_with_dims() callme_with_dims() self.assertEqual( pyf.global_registry().metadata.get_metadata( 'histogram_dim=hello_histogram.{0}_calls'.format( qcallme_with_dims)), { 'dimensions': {'histogram_dim': 'hello_histogram'}, 'metric': '{0}_calls'.format(qcallme_with_dims), }) self.assertEqual(pyf.dump_metrics(), { '{0}_calls'.format(qcallme): { 'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, 'histogram_dim=hello_histogram.{0}_calls'.format( qcallme_with_dims): {'count': 3, '999_percentile': 1, '99_percentile': 1, 'min': 1, '95_percentile': 1, '75_percentile': 1, 'std_dev': 0.0, 'max': 1, 'avg': 1.0}, }) def test_meter(self): reg = pyf.MetricsRegistry() reg.meter('test_meter') reg.meter('test_meter_with_dim', meter_dim='hello_meter') self.assertEqual( reg.metadata.get_metadata( 'meter_dim=hello_meter.test_meter_with_dim'), { 'dimensions': {'meter_dim': 'hello_meter'}, 'metric': 'test_meter_with_dim', }) self.assertEqual(len(reg.dump_metrics()), 2) reg.clear() self.assertEqual(reg.dump_metrics(), {}) self.assertEqual(len(reg.metadata._metadata), 0) def test_global_meter(self): pyf.meter('test_meter') pyf.meter('test_meter_with_dim', meter_dim='hello_meter') self.assertEqual( pyf.global_registry().metadata.get_metadata( 'meter_dim=hello_meter.test_meter_with_dim'), { 'dimensions': {'meter_dim': 'hello_meter'}, 'metric': 'test_meter_with_dim', }) self.assertEqual(len(pyf.dump_metrics()), 2) def test_meter_decorator(self): @pyf.meter_calls def callme(): return 1 @pyf.meter_calls_with_dims(meter_dim='hello_meter') def callme_with_dims(): return 1 qcallme_with_dims = get_qualname(callme_with_dims) callme() callme() callme() callme_with_dims() callme_with_dims() callme_with_dims() self.assertEqual( pyf.global_registry().metadata.get_metadata( 'meter_dim=hello_meter.{0}_calls'.format(qcallme_with_dims)), { 'dimensions': {'meter_dim': 'hello_meter'}, 'metric': '{0}_calls'.format(qcallme_with_dims), }) self.assertEqual(len(pyf.dump_metrics()), 2) def test_timer(self): reg = pyf.MetricsRegistry() reg.timer('test_timer') reg.timer('test_timer_with_dim', timer_dim='hello_timer') self.assertEqual( reg.metadata.get_metadata( 'timer_dim=hello_timer.test_timer_with_dim'), { 'dimensions': {'timer_dim': 'hello_timer'}, 'metric': 'test_timer_with_dim', }) self.assertEqual(len(reg.dump_metrics()), 2) reg.clear() self.assertEqual(reg.dump_metrics(), {}) self.assertEqual(len(reg.metadata._metadata), 0) def test_global_timer(self): pyf.timer('test_timer') pyf.timer('test_timer_with_dim', timer_dim='hello_timer') self.assertEqual( pyf.global_registry().metadata.get_metadata( 'timer_dim=hello_timer.test_timer_with_dim'), { 'dimensions': {'timer_dim': 'hello_timer'}, 'metric': 'test_timer_with_dim', }) self.assertEqual(len(pyf.dump_metrics()), 2) def test_timer_decorator(self): @pyf.time_calls def callme(): return 1 @pyf.time_calls_with_dims(timer_dim='hello_timer') def callme_with_dims(): return 1 qcallme_with_dims = get_qualname(callme_with_dims) callme() callme() callme() callme_with_dims() callme_with_dims() callme_with_dims() self.assertEqual( pyf.global_registry().metadata.get_metadata( 'timer_dim=hello_timer.{0}_calls'.format(qcallme_with_dims)), { 'dimensions': {'timer_dim': 'hello_timer'}, 'metric': '{0}_calls'.format(qcallme_with_dims), }) self.assertEqual(len(pyf.dump_metrics()), 2) if __name__ == '__main__': unittest.main()
34.991098
77
0.545794
1,273
11,792
4.718775
0.073841
0.063925
0.041951
0.043949
0.841685
0.805727
0.766939
0.750458
0.737473
0.737473
0
0.02511
0.324542
11,792
336
78
35.095238
0.729065
0.010092
0
0.713311
0
0
0.229326
0.088954
0
0
0
0
0.12628
1
0.078498
false
0.006826
0.017065
0.020478
0.119454
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
86d6f20744ccd21888ff3f18391753887eb5eb6e
4,123
py
Python
examples/jaqal/single_qubit_gst.py
haikusw/jaqalpaq
d507e894cb897756a1e51c99582b736254995b4e
[ "Apache-2.0" ]
8
2021-02-19T23:25:28.000Z
2021-09-24T20:11:13.000Z
examples/jaqal/single_qubit_gst.py
haikusw/jaqalpaq
d507e894cb897756a1e51c99582b736254995b4e
[ "Apache-2.0" ]
null
null
null
examples/jaqal/single_qubit_gst.py
haikusw/jaqalpaq
d507e894cb897756a1e51c99582b736254995b4e
[ "Apache-2.0" ]
null
null
null
( "circuit", ("register", "q", 1), ("macro", "F0", "qubit", ("sequential_block",)), ("macro", "F1", "qubit", ("sequential_block", ("gate", "Sx", "qubit"))), ("macro", "F2", "qubit", ("sequential_block", ("gate", "Sy", "qubit"))), ( "macro", "F3", "qubit", ("sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sx", "qubit")), ), ( "macro", "F4", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sx", "qubit"), ("gate", "Sx", "qubit"), ), ), ( "macro", "F5", "qubit", ( "sequential_block", ("gate", "Sy", "qubit"), ("gate", "Sy", "qubit"), ("gate", "Sy", "qubit"), ), ), ("macro", "G0", "qubit", ("sequential_block", ("gate", "Sx", "qubit"))), ("macro", "G1", "qubit", ("sequential_block", ("gate", "Sy", "qubit"))), ("macro", "G2", "qubit", ("sequential_block", ("gate", "I_Sx", "qubit"))), ( "macro", "G3", "qubit", ("sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sy", "qubit")), ), ( "macro", "G4", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sy", "qubit"), ("gate", "I_Sx", "qubit"), ), ), ( "macro", "G5", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "I_Sx", "qubit"), ("gate", "Sy", "qubit"), ), ), ( "macro", "G6", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "I_Sx", "qubit"), ("gate", "I_Sx", "qubit"), ), ), ( "macro", "G7", "qubit", ( "sequential_block", ("gate", "Sy", "qubit"), ("gate", "I_Sx", "qubit"), ("gate", "I_Sx", "qubit"), ), ), ( "macro", "G8", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sx", "qubit"), ("gate", "I_Sx", "qubit"), ("gate", "Sy", "qubit"), ), ), ( "macro", "G9", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sy", "qubit"), ("gate", "Sy", "qubit"), ("gate", "I_Sx", "qubit"), ), ), ( "macro", "G10", "qubit", ( "sequential_block", ("gate", "Sx", "qubit"), ("gate", "Sx", "qubit"), ("gate", "Sy", "qubit"), ("gate", "Sx", "qubit"), ("gate", "Sy", "qubit"), ("gate", "Sy", "qubit"), ), ), ("gate", "prepare_all"), ("gate", "F0", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F1", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F2", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F3", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F4", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F5", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F1", ("array_item", "q", 0)), ("gate", "F1", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F1", ("array_item", "q", 0)), ("gate", "F2", ("array_item", "q", 0)), ("gate", "measure_all"), ("gate", "prepare_all"), ("gate", "F1", ("array_item", "q", 0)), ("loop", 8, ("sequential_block", ("gate", "G1", ("array_item", "q", 0)))), ("gate", "F1", ("array_item", "q", 0)), ("gate", "measure_all"), )
26.094937
79
0.360902
357
4,123
4.005602
0.103641
0.127273
0.237762
0.268531
0.895105
0.87972
0.876224
0.708392
0.703497
0.626573
0
0.017544
0.364055
4,123
157
80
26.261147
0.527841
0
0
0.694268
0
0
0.357264
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
9
86f6aa7336abb75cd61c8b8fbecbbeee2a5dc4ba
3,708
py
Python
tests/test_process_collector.py
vmarkovtsev/client_python
dd93abe3b1d20bf8ac0ea07080e1c961dc8e44bd
[ "Apache-2.0" ]
2,729
2015-02-12T13:13:24.000Z
2022-03-30T10:33:12.000Z
tests/test_process_collector.py
vmarkovtsev/client_python
dd93abe3b1d20bf8ac0ea07080e1c961dc8e44bd
[ "Apache-2.0" ]
668
2015-02-10T22:57:50.000Z
2022-03-30T06:25:49.000Z
tests/test_process_collector.py
vmarkovtsev/client_python
dd93abe3b1d20bf8ac0ea07080e1c961dc8e44bd
[ "Apache-2.0" ]
767
2015-02-10T22:51:46.000Z
2022-03-26T01:11:58.000Z
from __future__ import unicode_literals import os import unittest from prometheus_client import CollectorRegistry, ProcessCollector class TestProcessCollector(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.test_proc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'proc') def test_working(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry) collector._ticks = 100 collector._pagesize = 4096 self.assertEqual(17.21, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(56274944.0, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(8114176, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(1418184099.75, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(2048.0, self.registry.get_sample_value('process_max_fds')) self.assertEqual(5.0, self.registry.get_sample_value('process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) def test_namespace(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry, namespace='n') collector._ticks = 100 collector._pagesize = 4096 self.assertEqual(17.21, self.registry.get_sample_value('n_process_cpu_seconds_total')) self.assertEqual(56274944.0, self.registry.get_sample_value('n_process_virtual_memory_bytes')) self.assertEqual(8114176, self.registry.get_sample_value('n_process_resident_memory_bytes')) self.assertEqual(1418184099.75, self.registry.get_sample_value('n_process_start_time_seconds')) self.assertEqual(2048.0, self.registry.get_sample_value('n_process_max_fds')) self.assertEqual(5.0, self.registry.get_sample_value('n_process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) def test_working_584(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: "584\n", registry=self.registry) collector._ticks = 100 collector._pagesize = 4096 self.assertEqual(0.0, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(10395648.0, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(634880, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(1418291667.75, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) def test_working_fake_pid(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 123, registry=self.registry) collector._ticks = 100 collector._pagesize = 4096 self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(None, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(None, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(None, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) if __name__ == '__main__': unittest.main()
54.529412
115
0.755933
471
3,708
5.592357
0.152866
0.145786
0.153759
0.215262
0.854594
0.854594
0.854594
0.853075
0.851936
0.809415
0
0.048095
0.136462
3,708
67
116
55.343284
0.774516
0
0
0.301887
0
0
0.176645
0.137271
0
0
0
0
0.509434
1
0.09434
false
0
0.075472
0
0.188679
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
86ff1581adfdbc6ba1c6c689e6a0d1e115b08d15
161
py
Python
Lib/Scripts/glyphs/actions/copy & paste.py
gferreira/hTools2
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
[ "BSD-3-Clause" ]
11
2015-01-06T15:43:56.000Z
2019-07-27T00:35:20.000Z
Lib/Scripts/glyphs/actions/copy & paste.py
gferreira/hTools2
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
[ "BSD-3-Clause" ]
2
2017-05-17T10:11:46.000Z
2018-11-21T21:43:43.000Z
Lib/Scripts/glyphs/actions/copy & paste.py
gferreira/hTools2
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
[ "BSD-3-Clause" ]
4
2015-01-10T13:58:50.000Z
2019-12-18T15:40:14.000Z
# [h] copy / paste import hTools2.dialogs.glyphs.copy_paste reload(hTools2.dialogs.glyphs.copy_paste) hTools2.dialogs.glyphs.copy_paste.copyPasteGlyphDialog()
23
56
0.819876
21
161
6.142857
0.428571
0.27907
0.465116
0.55814
0.674419
0
0
0
0
0
0
0.02
0.068323
161
6
57
26.833333
0.84
0.099379
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
810d025d92882a4f6972fa75975ddd07cb8d6801
2,460
py
Python
beer-song/beer_song_test.py
mambocab/xpython
be4aacc18aafee449fa2ce0f515ee03b0c8ae4d9
[ "MIT" ]
null
null
null
beer-song/beer_song_test.py
mambocab/xpython
be4aacc18aafee449fa2ce0f515ee03b0c8ae4d9
[ "MIT" ]
null
null
null
beer-song/beer_song_test.py
mambocab/xpython
be4aacc18aafee449fa2ce0f515ee03b0c8ae4d9
[ "MIT" ]
1
2020-06-10T23:33:20.000Z
2020-06-10T23:33:20.000Z
import unittest from beer import song, verse class BeerTest(unittest.TestCase): def test_a_verse(self): self.assertEqual( verse(8), "8 bottles of beer on the wall, 8 bottles of beer.\n" "Take one down and pass it around, 7 bottles of beer on the wall.\n" ) def test_verse_1(self): self.assertEqual( verse(1), "1 bottle of beer on the wall, 1 bottle of beer.\n" "Take it down and pass it around, no more bottles of beer on the wall.\n" ) def test_verse_2(self): self.assertEqual( verse(2), "2 bottles of beer on the wall, 2 bottles of beer.\n" "Take one down and pass it around, 1 bottle of beer on the wall.\n" ) def test_verse_0(self): self.assertEqual( verse(0), "No more bottles of beer on the wall, no more bottles of beer.\n" "Go to the store and buy some more, 99 bottles of beer on the wall.\n" ) def test_songing_several_verses(self): self.assertEqual( song(8, 6), "8 bottles of beer on the wall, 8 bottles of beer.\n" "Take one down and pass it around, 7 bottles of beer on the wall.\n" "\n" "7 bottles of beer on the wall, 7 bottles of beer.\n" "Take one down and pass it around, 6 bottles of beer on the wall.\n" "\n" "6 bottles of beer on the wall, 6 bottles of beer.\n" "Take one down and pass it around, 5 bottles of beer on the wall.\n" "\n" ) def test_song_all_the_rest_of_the_verses(self): self.assertEqual( song(3), "3 bottles of beer on the wall, 3 bottles of beer.\n" "Take one down and pass it around, 2 bottles of beer on the wall.\n" "\n" "2 bottles of beer on the wall, 2 bottles of beer.\n" "Take one down and pass it around, 1 bottle of beer on the wall.\n" "\n" "1 bottle of beer on the wall, 1 bottle of beer.\n" "Take it down and pass it around, no more bottles of beer on the wall.\n" "\n" "No more bottles of beer on the wall, no more bottles of beer.\n" "Go to the store and buy some more, 99 bottles of beer on the wall.\n" "\n" ) if __name__ == '__main__': unittest.main()
36.176471
85
0.55813
389
2,460
3.462725
0.133676
0.146993
0.260579
0.179659
0.812918
0.769859
0.753526
0.717149
0.66147
0.647365
0
0.024809
0.360976
2,460
67
86
36.716418
0.832061
0
0
0.5
0
0
0.545122
0
0
0
0
0
0.103448
1
0.103448
false
0.155172
0.034483
0
0.155172
0
0
0
0
null
0
1
1
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
d4b8cad13f3f663e5b8fa574dc0bfdd109f5614f
136
py
Python
scripts/create_db.py
abrookins/siren
8e85d35e01e804ce962ea3ffe88885270b3bd573
[ "MIT" ]
2
2015-01-12T10:04:29.000Z
2018-07-09T16:56:27.000Z
scripts/create_db.py
abrookins/siren
8e85d35e01e804ce962ea3ffe88885270b3bd573
[ "MIT" ]
null
null
null
scripts/create_db.py
abrookins/siren
8e85d35e01e804ce962ea3ffe88885270b3bd573
[ "MIT" ]
null
null
null
import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import util util.make_crimes_db()
15.111111
77
0.779412
23
136
4.347826
0.521739
0.18
0.26
0.3
0.32
0
0
0
0
0
0
0
0.080882
136
8
78
17
0.8
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d4bf4c78ad21229345e1f693919c458914f989b7
107
py
Python
config.py
e-io/anticovirus
893c3746e2e3471b75ff5e8f2fcdcddbfcdd834e
[ "Apache-2.0" ]
1
2020-05-18T17:26:04.000Z
2020-05-18T17:26:04.000Z
config.py
e-io/anticovirus
893c3746e2e3471b75ff5e8f2fcdcddbfcdd834e
[ "Apache-2.0" ]
null
null
null
config.py
e-io/anticovirus
893c3746e2e3471b75ff5e8f2fcdcddbfcdd834e
[ "Apache-2.0" ]
null
null
null
vk_token="7fc711c235c1904d191b6a80b7c440b66dc90e76919d7e006cad3922b599016a32226736c5bbcf91555c5" lang="ru"
35.666667
96
0.925234
5
107
19.6
1
0
0
0
0
0
0
0
0
0
0
0.561905
0.018692
107
2
97
53.5
0.371429
0
0
0
0
0
0.813084
0.794393
0
1
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
8
d4c0293a1d5cce797deb06ef44cce236502d021a
5,958
py
Python
tests/python/test_sparse_matrix.py
josephgalestian/taichiV2-master
12a63a05fdccc824205b1ee6545e4706bf473405
[ "MIT" ]
null
null
null
tests/python/test_sparse_matrix.py
josephgalestian/taichiV2-master
12a63a05fdccc824205b1ee6545e4706bf473405
[ "MIT" ]
null
null
null
tests/python/test_sparse_matrix.py
josephgalestian/taichiV2-master
12a63a05fdccc824205b1ee6545e4706bf473405
[ "MIT" ]
null
null
null
import taichi as ti from tests import test_utils @test_utils.test(arch=ti.cpu) def test_sparse_matrix_builder(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j fill(Abuilder) A = Abuilder.build() for i in range(n): for j in range(n): assert A[i, j] == i + j @test_utils.test(arch=ti.cpu) def test_sparse_matrix_shape(): n, m = 8, 9 Abuilder = ti.linalg.SparseMatrixBuilder(n, m, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, m): Abuilder[i, j] += i + j fill(Abuilder) A = Abuilder.build() assert A.shape() == (n, m) @test_utils.test(arch=ti.cpu) def test_sparse_matrix_element_access(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i in range(n): Abuilder[i, i] += i fill(Abuilder) A = Abuilder.build() for i in range(n): assert A[i, i] == i @test_utils.test(arch=ti.cpu) def test_sparse_matrix_element_modify(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i in range(n): Abuilder[i, i] += i fill(Abuilder) A = Abuilder.build() A[0, 0] = 1024.0 assert A[0, 0] == 1024.0 @test_utils.test(arch=ti.cpu) def test_sparse_matrix_addition(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) Bbuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder(), Bbuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j Bbuilder[i, j] += i - j fill(Abuilder, Bbuilder) A = Abuilder.build() B = Bbuilder.build() C = A + B for i in range(n): for j in range(n): assert C[i, j] == 2 * i @test_utils.test(arch=ti.cpu) def test_sparse_matrix_subtraction(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) Bbuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder(), Bbuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j Bbuilder[i, j] += i - j fill(Abuilder, Bbuilder) A = Abuilder.build() B = Bbuilder.build() C = A - B for i in range(n): for j in range(n): assert C[i, j] == 2 * j @test_utils.test(arch=ti.cpu) def test_sparse_matrix_scalar_multiplication(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j fill(Abuilder) A = Abuilder.build() B = A * 3.0 for i in range(n): for j in range(n): assert B[i, j] == 3 * (i + j) @test_utils.test(arch=ti.cpu) def test_sparse_matrix_transpose(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j fill(Abuilder) A = Abuilder.build() B = A.transpose() for i in range(n): for j in range(n): assert B[i, j] == A[j, i] @test_utils.test(arch=ti.cpu) def test_sparse_matrix_elementwise_multiplication(): n = 8 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) Bbuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder(), Bbuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j Bbuilder[i, j] += i - j fill(Abuilder, Bbuilder) A = Abuilder.build() B = Bbuilder.build() C = A * B for i in range(n): for j in range(n): assert C[i, j] == (i + j) * (i - j) @test_utils.test(arch=ti.cpu) def test_sparse_matrix_multiplication(): n = 2 Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) Bbuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder(), Bbuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, n): Abuilder[i, j] += i + j Bbuilder[i, j] += i - j fill(Abuilder, Bbuilder) A = Abuilder.build() B = Bbuilder.build() C = A @ B assert C[0, 0] == 1.0 assert C[0, 1] == 0.0 assert C[1, 0] == 2.0 assert C[1, 1] == -1.0 @test_utils.test(arch=ti.cpu) def test_sparse_matrix_nonsymmetric_multiplication(): n, k, m = 2, 3, 4 Abuilder = ti.linalg.SparseMatrixBuilder(n, k, max_num_triplets=100) Bbuilder = ti.linalg.SparseMatrixBuilder(k, m, max_num_triplets=100) @ti.kernel def fill(Abuilder: ti.linalg.sparse_matrix_builder(), Bbuilder: ti.linalg.sparse_matrix_builder()): for i, j in ti.ndrange(n, k): Abuilder[i, j] += i + j for i, j in ti.ndrange(k, m): Bbuilder[i, j] -= i + j fill(Abuilder, Bbuilder) A = Abuilder.build() B = Bbuilder.build() C = A @ B GT = [[-5, -8, -11, -14], [-8, -14, -20, -26]] for i in range(n): for j in range(m): assert C[i, j] == GT[i][j]
27.583333
72
0.597684
904
5,958
3.813053
0.06969
0.029011
0.102118
0.019727
0.920511
0.89179
0.884537
0.884537
0.869452
0.862779
0
0.025086
0.264015
5,958
215
73
27.711628
0.761003
0
0
0.736842
0
0
0
0
0
0
0
0
0.081871
1
0.128655
false
0
0.011696
0
0.140351
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d4d3eb3ecdc6bc63b9fb323f4f6cbb2ee4137d98
45,387
py
Python
odoo-13.0/addons/sale_stock/tests/test_anglo_saxon_valuation.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
12
2021-03-26T08:39:40.000Z
2022-03-16T02:20:10.000Z
odoo-13.0/addons/sale_stock/tests/test_anglo_saxon_valuation.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
13
2020-12-20T16:00:21.000Z
2022-03-14T14:55:30.000Z
odoo-13.0/addons/sale_stock/tests/test_anglo_saxon_valuation.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
17
2020-08-31T11:18:49.000Z
2022-02-09T05:57:31.000Z
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.tests import Form from odoo.tests.common import SavepointCase from odoo.exceptions import UserError class TestAngloSaxonValuation(SavepointCase): @classmethod def setUpClass(cls): super(TestAngloSaxonValuation, cls).setUpClass() cls.env.user.company_id.anglo_saxon_accounting = True cls.product = cls.env['product.product'].create({ 'name': 'product', 'type': 'product', 'categ_id': cls.env.ref('product.product_category_all').id, }) cls.stock_input_account = cls.env['account.account'].create({ 'name': 'Stock Input', 'code': 'StockIn', 'user_type_id': cls.env.ref('account.data_account_type_current_assets').id, }) cls.stock_output_account = cls.env['account.account'].create({ 'name': 'Stock Output', 'code': 'StockOut', 'reconcile': True, 'user_type_id': cls.env.ref('account.data_account_type_current_assets').id, }) cls.stock_valuation_account = cls.env['account.account'].create({ 'name': 'Stock Valuation', 'code': 'StockVal', 'user_type_id': cls.env.ref('account.data_account_type_current_assets').id, }) cls.expense_account = cls.env['account.account'].create({ 'name': 'Expense Account', 'code': 'Exp', 'user_type_id': cls.env.ref('account.data_account_type_expenses').id, }) cls.income_account = cls.env['account.account'].create({ 'name': 'Income Account', 'code': 'Inc', 'user_type_id': cls.env.ref('account.data_account_type_expenses').id, }) cls.stock_journal = cls.env['account.journal'].create({ 'name': 'Stock Journal', 'code': 'STJTEST', 'type': 'general', }) cls.product.write({ 'property_account_expense_id': cls.expense_account.id, 'property_account_income_id': cls.income_account.id, }) cls.product.categ_id.write({ 'property_stock_account_input_categ_id': cls.stock_input_account.id, 'property_stock_account_output_categ_id': cls.stock_output_account.id, 'property_stock_valuation_account_id': cls.stock_valuation_account.id, 'property_stock_journal': cls.stock_journal.id, 'property_valuation': 'real_time', }) cls.stock_location = cls.env['stock.warehouse'].search([], limit=1).lot_stock_id cls.recv_account = cls.env['account.account'].create({ 'name': 'account receivable', 'code': 'RECV', 'user_type_id': cls.env.ref('account.data_account_type_receivable').id, 'reconcile': True, }) cls.pay_account = cls.env['account.account'].create({ 'name': 'account payable', 'code': 'PAY', 'user_type_id': cls.env.ref('account.data_account_type_payable').id, 'reconcile': True, }) cls.customer = cls.env['res.partner'].create({ 'name': 'customer', 'property_account_receivable_id': cls.recv_account.id, 'property_account_payable_id': cls.pay_account.id, }) cls.journal_sale = cls.env['account.journal'].create({ 'name': 'Sale Journal - Test', 'code': 'AJ-SALE', 'type': 'sale', 'company_id': cls.env.user.company_id.id, }) cls.counterpart_account = cls.env['account.account'].create({ 'name': 'Counterpart account', 'code': 'Count', 'user_type_id': cls.env.ref('account.data_account_type_expenses').id, }) def _inv_adj_two_units(self): inventory = self.env['stock.inventory'].create({ 'name': 'test', 'location_ids': [(4, self.stock_location.id)], 'product_ids': [(4, self.product.id)], }) inventory.action_start() self.env['stock.inventory.line'].create({ 'inventory_id': inventory.id, 'location_id': self.stock_location.id, 'product_id': self.product.id, 'product_qty': 2, }) inventory.action_validate() def _so_and_confirm_two_units(self): sale_order = self.env['sale.order'].create({ 'partner_id': self.customer.id, 'order_line': [ (0, 0, { 'name': self.product.name, 'product_id': self.product.id, 'product_uom_qty': 2.0, 'product_uom': self.product.uom_id.id, 'price_unit': 12, 'tax_id': False, # no love taxes amls })], }) sale_order.action_confirm() return sale_order def _fifo_in_one_eight_one_ten(self): # Put two items in stock. in_move_1 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 1, 'price_unit': 8, }) in_move_1._action_confirm() in_move_1.quantity_done = 1 in_move_1._action_done() in_move_2 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 1, 'price_unit': 10, }) in_move_2._action_confirm() in_move_2.quantity_done = 1 in_move_2._action_done() # ------------------------------------------------------------------------- # Standard Ordered # ------------------------------------------------------------------------- def test_standard_ordered_invoice_pre_delivery(self): """Standard price set to 10. Get 2 units in stock. Sale order 2@12. Standard price set to 14. Invoice 2 without delivering. The amount in Stock OUT and COGS should be 14*2. """ self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'order' self.product._change_standard_price(10.0, counterpart_account_id=self.counterpart_account.id) # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # standard price to 14 self.product._change_standard_price(14.0, counterpart_account_id=self.counterpart_account.id) # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 28) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 28) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_standard_ordered_invoice_post_partial_delivery_1(self): """Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, invoice 1, change the standard price to 14, deliver one, change the standard price to 16, invoice 1. The amounts used in Stock OUT and COGS should be 10 then 14.""" self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'order' self.product._change_standard_price(10.0, counterpart_account_id=self.counterpart_account.id) # Put two items in stock. sale_order = self._so_and_confirm_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # Invoice 1 invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice_form = Form(invoice) with invoice_form.invoice_line_ids.edit(0) as invoice_line: invoice_line.quantity = 1 invoice_form.save() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 10) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 10) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 12) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 12) # change the standard price to 14 self.product._change_standard_price(14.0, counterpart_account_id=self.counterpart_account.id) # deliver the backorder sale_order.picking_ids[0].move_lines.quantity_done = 1 sale_order.picking_ids[0].button_validate() # change the standard price to 16 self.product._change_standard_price(16.0, counterpart_account_id=self.counterpart_account.id) # invoice 1 invoice2 = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice2.post() amls = invoice2.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 14) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 14) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 12) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 12) def test_standard_ordered_invoice_post_delivery(self): """Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, change the standard price to 14, deliver one, invoice 2. The amounts used in Stock OUT and COGS should be 12*2.""" self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'order' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # change the standard price to 14 self.product._change_standard_price(14.0, counterpart_account_id=self.counterpart_account.id) # deliver the backorder sale_order.picking_ids.filtered('backorder_id').move_lines.quantity_done = 1 sale_order.picking_ids.filtered('backorder_id').button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 24) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 24) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) # ------------------------------------------------------------------------- # Standard Delivered # ------------------------------------------------------------------------- def test_standard_delivered_invoice_pre_delivery(self): """Not possible to invoice pre delivery.""" self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Invoice the sale order. # Nothing delivered = nothing to invoice. with self.assertRaises(UserError): sale_order._create_invoices() def test_standard_delivered_invoice_post_partial_delivery(self): """Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, invoice 1, change the standard price to 14, deliver one, change the standard price to 16, invoice 1. The amounts used in Stock OUT and COGS should be 10 then 14.""" self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. sale_order = self._so_and_confirm_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # Invoice 1 invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice_form = Form(invoice) with invoice_form.invoice_line_ids.edit(0) as invoice_line: invoice_line.quantity = 1 invoice_form.save() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 10) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 10) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 12) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 12) # change the standard price to 14 self.product._change_standard_price(14.0, counterpart_account_id=self.counterpart_account.id) # deliver the backorder sale_order.picking_ids[0].move_lines.quantity_done = 1 sale_order.picking_ids[0].button_validate() # change the standard price to 16 self.product._change_standard_price(16.0, counterpart_account_id=self.counterpart_account.id) # invoice 1 invoice2 = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice2.post() amls = invoice2.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 14) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 14) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 12) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 12) def test_standard_delivered_invoice_post_delivery(self): """Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, change the standard price to 14, deliver one, invoice 2. The amounts used in Stock OUT and COGS should be 12*2.""" self.product.categ_id.property_cost_method = 'standard' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # change the standard price to 14 self.product._change_standard_price(14.0, counterpart_account_id=self.counterpart_account.id) # deliver the backorder sale_order.picking_ids.filtered('backorder_id').move_lines.quantity_done = 1 sale_order.picking_ids.filtered('backorder_id').button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 24) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 24) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) # ------------------------------------------------------------------------- # AVCO Ordered # ------------------------------------------------------------------------- def test_avco_ordered_invoice_pre_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice without delivering.""" self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'order' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_avco_ordered_invoice_post_partial_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice after delivering 1.""" self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'order' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_avco_ordered_invoice_post_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice after full delivery.""" self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'order' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 2 sale_order.picking_ids.button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) # ------------------------------------------------------------------------- # AVCO Delivered # ------------------------------------------------------------------------- def test_avco_delivered_invoice_pre_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice without delivering. """ self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Invoice the sale order. # Nothing delivered = nothing to invoice. with self.assertRaises(UserError): sale_order._create_invoices() def test_avco_delivered_invoice_post_partial_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice after delivering 1.""" self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 10) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 10) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 12) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 12) def test_avco_delivered_invoice_post_delivery(self): """Standard price set to 10. Sale order 2@12. Invoice after full delivery.""" self.product.categ_id.property_cost_method = 'average' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 # Put two items in stock. self._inv_adj_two_units() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 2 sale_order.picking_ids.button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) # ------------------------------------------------------------------------- # FIFO Ordered # ------------------------------------------------------------------------- def test_fifo_ordered_invoice_pre_delivery(self): """Receive at 8 then at 10. Sale order 2@12. Invoice without delivering. As no standard price is set, the Stock OUT and COGS amounts are 0.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'order' self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 0) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 0) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_fifo_ordered_invoice_post_partial_delivery(self): """Receive 1@8, 1@10, so 2@12, standard price 12, deliver 1, invoice 2: the COGS amount should be 20: 1 really delivered at 10 and the other valued at the standard price 10.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'order' self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # upate the standard price to 12 self.product.standard_price = 12 # Invoice 2 invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice_form = Form(invoice) with invoice_form.invoice_line_ids.edit(0) as invoice_line: invoice_line.quantity = 2 invoice_form.save() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_fifo_ordered_invoice_post_delivery(self): """Receive at 8 then at 10. Sale order 2@12. Invoice after delivering everything.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'order' self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 2 sale_order.picking_ids.button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 18) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 18) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) # ------------------------------------------------------------------------- # FIFO Delivered # ------------------------------------------------------------------------- def test_fifo_delivered_invoice_pre_delivery(self): self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Invoice the sale order. # Nothing delivered = nothing to invoice. with self.assertRaises(UserError): invoice_id = sale_order._create_invoices() def test_fifo_delivered_invoice_post_partial_delivery(self): """Receive 1@8, 1@10, so 2@12, standard price 12, deliver 1, invoice 2: the price used should be 10: one at 8 and one at 10.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'delivery' self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 1 wiz = sale_order.picking_ids.button_validate() wiz = self.env[wiz['res_model']].browse(wiz['res_id']) wiz.process() # upate the standard price to 12 self.product.standard_price = 12 # Invoice 2 invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice_form = Form(invoice) with invoice_form.invoice_line_ids.edit(0) as invoice_line: invoice_line.quantity = 2 invoice_form.save() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 20) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 20) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_fifo_delivered_invoice_post_delivery(self): """Receive at 8 then at 10. Sale order 2@12. Invoice after delivering everything.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 self._fifo_in_one_eight_one_ten() # Create and confirm a sale order for 2@12 sale_order = self._so_and_confirm_two_units() # Deliver one. sale_order.picking_ids.move_lines.quantity_done = 2 sale_order.picking_ids.button_validate() # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 18) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 18) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 24) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 24) def test_fifo_delivered_invoice_post_delivery_2(self): """Receive at 8 then at 10. Sale order 10@12 and deliver without receiving the 2 missing. receive 2@12. Invoice.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'delivery' self.product.standard_price = 10 in_move_1 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 8, 'price_unit': 10, }) in_move_1._action_confirm() in_move_1.quantity_done = 8 in_move_1._action_done() # Create and confirm a sale order for 2@12 sale_order = self.env['sale.order'].create({ 'partner_id': self.customer.id, 'order_line': [ (0, 0, { 'name': self.product.name, 'product_id': self.product.id, 'product_uom_qty': 10.0, 'product_uom': self.product.uom_id.id, 'price_unit': 12, 'tax_id': False, # no love taxes amls })], }) sale_order.action_confirm() # Deliver 10 sale_order.picking_ids.move_lines.quantity_done = 10 sale_order.picking_ids.button_validate() # Make the second receipt in_move_2 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 2, 'price_unit': 12, }) in_move_2._action_confirm() in_move_2.quantity_done = 2 in_move_2._action_done() self.assertEqual(self.product.stock_valuation_layer_ids[-1].value, -4) # we sent two at 10 but they should have been sent at 12 self.assertEqual(self.product.stock_valuation_layer_ids[-1].quantity, 0) self.assertEqual(sale_order.order_line.move_ids.stock_valuation_layer_ids[-1].quantity, 0) # Invoice the sale order. invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # Check the resulting accounting entries amls = invoice.line_ids self.assertEqual(len(amls), 4) stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.stock_output_account) self.assertEqual(stock_out_aml.debit, 0) self.assertEqual(stock_out_aml.credit, 104) cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 104) self.assertEqual(cogs_aml.credit, 0) receivable_aml = amls.filtered(lambda aml: aml.account_id == self.recv_account) self.assertEqual(receivable_aml.debit, 120) self.assertEqual(receivable_aml.credit, 0) income_aml = amls.filtered(lambda aml: aml.account_id == self.income_account) self.assertEqual(income_aml.debit, 0) self.assertEqual(income_aml.credit, 120) def test_fifo_delivered_invoice_post_delivery_3(self): """Receive 5@8, receive 8@12, sale 1@20, deliver, sale 6@20, deliver. Make sure no rouding issues appear on the second invoice.""" self.product.categ_id.property_cost_method = 'fifo' self.product.invoice_policy = 'delivery' # +5@8 in_move_1 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 5, 'price_unit': 8, }) in_move_1._action_confirm() in_move_1.quantity_done = 5 in_move_1._action_done() # +8@12 in_move_2 = self.env['stock.move'].create({ 'name': 'a', 'product_id': self.product.id, 'location_id': self.env.ref('stock.stock_location_suppliers').id, 'location_dest_id': self.stock_location.id, 'product_uom': self.product.uom_id.id, 'product_uom_qty': 8, 'price_unit': 12, }) in_move_2._action_confirm() in_move_2.quantity_done = 8 in_move_2._action_done() # sale 1@20, deliver, invoice sale_order = self.env['sale.order'].create({ 'partner_id': self.customer.id, 'order_line': [ (0, 0, { 'name': self.product.name, 'product_id': self.product.id, 'product_uom_qty': 1, 'product_uom': self.product.uom_id.id, 'price_unit': 20, 'tax_id': False, })], }) sale_order.action_confirm() sale_order.picking_ids.move_lines.quantity_done = 1 sale_order.picking_ids.button_validate() invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # sale 6@20, deliver, invoice sale_order = self.env['sale.order'].create({ 'partner_id': self.customer.id, 'order_line': [ (0, 0, { 'name': self.product.name, 'product_id': self.product.id, 'product_uom_qty': 6, 'product_uom': self.product.uom_id.id, 'price_unit': 20, 'tax_id': False, })], }) sale_order.action_confirm() sale_order.picking_ids.move_lines.quantity_done = 6 sale_order.picking_ids.button_validate() invoice = sale_order.with_context(default_journal_id=self.journal_sale.id)._create_invoices() invoice.post() # check the last anglo saxon invoice line amls = invoice.line_ids cogs_aml = amls.filtered(lambda aml: aml.account_id == self.expense_account) self.assertEqual(cogs_aml.debit, 56) self.assertEqual(cogs_aml.credit, 0)
44.893175
136
0.644634
5,810
45,387
4.762823
0.039931
0.090525
0.038523
0.055399
0.923966
0.90597
0.89596
0.888154
0.878072
0.873374
0
0.020098
0.237006
45,387
1,010
137
44.937624
0.778978
0.140921
0
0.831944
0
0
0.073587
0.019173
0
0
0
0
0.236111
1
0.033333
false
0
0.004167
0
0.040278
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d4d55b01e0778ee5f76e2fc56148b185058a54a8
118,842
py
Python
ai2thor/tests/test_event.py
ekolve/ai2thor-lgtm
0a8d5cf961134ee31f5410d4aa2f3f9f750d6911
[ "Apache-2.0" ]
null
null
null
ai2thor/tests/test_event.py
ekolve/ai2thor-lgtm
0a8d5cf961134ee31f5410d4aa2f3f9f750d6911
[ "Apache-2.0" ]
2
2021-04-26T16:29:22.000Z
2021-04-26T16:34:39.000Z
ai2thor/tests/test_event.py
ekolve/ai2thor-lgtm
0a8d5cf961134ee31f5410d4aa2f3f9f750d6911
[ "Apache-2.0" ]
null
null
null
import os from ai2thor.server import Event import numpy as np import pytest from ai2thor.tests.constants import TESTS_DATA_DIR metadata_complex = { "agent": { "bounds3D": [], "cameraHorizon": 0.0, "distance": 0.0, "isopen": False, "name": "agent", "objectId": "", "objectType": "", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": {"x": -0.75, "y": 0.9799995422363281, "z": -0.25}, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 180.0, "z": 0.0}, "visible": False, }, "thirdPartyCameras": [], "agentId": 0, "collided": False, "collidedObjects": [], "colorBounds": [ {"bounds": [0, 0, 119, 299], "color": [138, 235, 7]}, {"bounds": [116, 0, 299, 99], "color": [127, 29, 203]}, {"bounds": [116, 0, 137, 64], "color": [237, 189, 33]}, {"bounds": [131, 0, 143, 55], "color": [97, 134, 44]}, {"bounds": [139, 0, 169, 71], "color": [193, 44, 202]}, {"bounds": [141, 0, 146, 30], "color": [96, 50, 133]}, {"bounds": [133, 0, 299, 85], "color": [89, 77, 61]}, {"bounds": [143, 0, 297, 34], "color": [214, 15, 78]}, {"bounds": [116, 0, 299, 99], "color": [115, 3, 101]}, {"bounds": [258, 12, 299, 84], "color": [96, 140, 59]}, {"bounds": [116, 14, 120, 28], "color": [162, 203, 153]}, {"bounds": [195, 15, 255, 85], "color": [108, 174, 95]}, {"bounds": [172, 17, 194, 71], "color": [168, 12, 250]}, {"bounds": [121, 18, 132, 30], "color": [246, 16, 151]}, {"bounds": [124, 29, 133, 40], "color": [116, 220, 170]}, {"bounds": [117, 31, 125, 63], "color": [115, 78, 181]}, {"bounds": [258, 35, 289, 43], "color": [241, 134, 252]}, {"bounds": [126, 39, 135, 49], "color": [114, 84, 146]}, {"bounds": [119, 44, 299, 203], "color": [73, 64, 168]}, {"bounds": [128, 48, 136, 57], "color": [185, 225, 171]}, {"bounds": [223, 54, 233, 69], "color": [14, 97, 183]}, {"bounds": [135, 56, 138, 74], "color": [96, 48, 36]}, {"bounds": [126, 69, 127, 69], "color": [66, 225, 0]}, {"bounds": [172, 72, 194, 84], "color": [191, 227, 85]}, {"bounds": [117, 77, 121, 78], "color": [92, 3, 233]}, {"bounds": [116, 81, 170, 96], "color": [177, 60, 44]}, {"bounds": [284, 91, 299, 123], "color": [110, 132, 248]}, {"bounds": [192, 92, 197, 97], "color": [36, 91, 74]}, {"bounds": [218, 92, 224, 97], "color": [56, 51, 197]}, {"bounds": [118, 93, 133, 101], "color": [72, 78, 219]}, {"bounds": [205, 93, 212, 99], "color": [178, 18, 13]}, {"bounds": [116, 95, 117, 106], "color": [60, 103, 95]}, {"bounds": [184, 95, 203, 106], "color": [42, 54, 156]}, {"bounds": [210, 95, 217, 103], "color": [214, 68, 168]}, {"bounds": [121, 96, 124, 118], "color": [226, 66, 148]}, {"bounds": [160, 96, 177, 101], "color": [135, 13, 200]}, {"bounds": [233, 96, 237, 103], "color": [127, 73, 96]}, {"bounds": [246, 96, 253, 102], "color": [18, 240, 113]}, {"bounds": [118, 97, 133, 120], "color": [110, 250, 103]}, {"bounds": [149, 97, 154, 105], "color": [44, 186, 193]}, {"bounds": [201, 97, 209, 115], "color": [118, 102, 24]}, {"bounds": [213, 97, 221, 115], "color": [182, 114, 149]}, {"bounds": [224, 97, 231, 103], "color": [20, 107, 195]}, {"bounds": [233, 97, 242, 110], "color": [219, 74, 174]}, {"bounds": [120, 98, 125, 106], "color": [202, 218, 132]}, {"bounds": [133, 98, 138, 110], "color": [122, 156, 16]}, {"bounds": [245, 99, 253, 112], "color": [216, 69, 22]}, {"bounds": [186, 107, 189, 108], "color": [34, 152, 164]}, {"bounds": [257, 107, 260, 108], "color": [48, 42, 241]}, {"bounds": [167, 108, 219, 187], "color": [92, 62, 94]}, {"bounds": [145, 109, 152, 113], "color": [17, 67, 188]}, {"bounds": [55, 134, 160, 298], "color": [216, 148, 75]}, {"bounds": [115, 136, 146, 203], "color": [181, 237, 187]}, {"bounds": [109, 189, 113, 210], "color": [104, 199, 254]}, {"bounds": [103, 195, 108, 219], "color": [238, 221, 39]}, {"bounds": [92, 201, 102, 239], "color": [36, 61, 25]}, {"bounds": [117, 202, 137, 208], "color": [143, 211, 227]}, {"bounds": [55, 202, 299, 299], "color": [55, 223, 207]}, {"bounds": [107, 210, 112, 218], "color": [135, 101, 149]}, {"bounds": [73, 213, 91, 269], "color": [1, 209, 145]}, {"bounds": [46, 234, 72, 299], "color": [215, 152, 183]}, {"bounds": [11, 263, 45, 299], "color": [45, 75, 161]}, ], "colors": [ {"color": [58, 205, 56], "name": "Bowl|-00.16|+01.50|-01.45"}, {"color": [209, 182, 193], "name": "Bowl"}, {"color": [226, 29, 217], "name": "Container|-00.16|+00.93|-02.94"}, {"color": [14, 114, 120], "name": "Container"}, {"color": [219, 14, 164], "name": "Ladel1.001"}, {"color": [138, 235, 7], "name": "Fridge|-00.22|00.00|-00.83"}, {"color": [91, 156, 207], "name": "Fridge1"}, {"color": [181, 237, 187], "name": "Cabinet|-00.35|+01.89|-03.29"}, {"color": [210, 149, 89], "name": "Drawer"}, {"color": [237, 189, 33], "name": "StoveBase1"}, {"color": [216, 148, 75], "name": "Cube.090"}, {"color": [117, 7, 236], "name": "Toaster|-00.16|+00.93|-01.45"}, {"color": [55, 33, 114], "name": "Toaster1"}, {"color": [215, 152, 183], "name": "Cabinet|-00.34|+01.89|-01.29"}, {"color": [44, 186, 193], "name": "Mug|-00.78|+00.93|-03.85"}, {"color": [8, 94, 186], "name": "CoffeeCup1"}, {"color": [122, 156, 16], "name": "Bottle5.001"}, {"color": [116, 220, 170], "name": "StoveKnob|-00.62|+00.90|-01.98"}, {"color": [106, 252, 95], "name": "StoveKnob2_Range4"}, {"color": [41, 198, 116], "name": "Spatula2.001"}, {"color": [119, 173, 49], "name": "Torus"}, {"color": [168, 12, 250], "name": "Cabinet|-01.01|+00.39|-03.37"}, {"color": [61, 44, 125], "name": "Microwave|-00.17|+01.49|-02.06"}, {"color": [54, 96, 202], "name": "Microwave4"}, {"color": [240, 130, 222], "name": "StoveBurner|-00.23|+00.93|-01.85"}, {"color": [156, 249, 101], "name": "GasStoveTop_Range1"}, {"color": [72, 78, 219], "name": "Sphere.010"}, {"color": [255, 102, 152], "name": "StoveBurner|-00.42|+00.93|-02.26"}, {"color": [248, 115, 142], "name": "StoveBurner|-00.23|+00.93|-02.26"}, {"color": [135, 13, 200], "name": "TurkeyPan.005"}, {"color": [45, 75, 161], "name": "Cabinet|-00.34|+02.11|-01.27"}, {"color": [92, 3, 233], "name": "Spatula1.002"}, {"color": [96, 50, 133], "name": "Towl1 (1)"}, {"color": [143, 211, 227], "name": "Cylinder.028"}, {"color": [108, 174, 95], "name": "Cube.085"}, {"color": [34, 152, 164], "name": "SugarJar.005"}, {"color": [96, 48, 36], "name": "Cabinet|-00.48|+00.78|-02.74"}, {"color": [131, 29, 70], "name": "Ladel3.001"}, {"color": [55, 223, 207], "name": "Ceiling"}, {"color": [102, 49, 87], "name": "Knife|-00.14|+01.12|-02.75"}, {"color": [211, 157, 122], "name": "Knife1"}, {"color": [177, 60, 44], "name": "Cube.100"}, {"color": [114, 84, 146], "name": "StoveKnob|-00.62|+00.90|-02.13"}, {"color": [60, 103, 95], "name": "Bottle3.001"}, {"color": [186, 206, 150], "name": "PaperRoll1"}, {"color": [164, 253, 150], "name": "Sphere.012"}, {"color": [77, 4, 136], "name": "Spatula1.001"}, {"color": [135, 101, 149], "name": "TurkeyPan.006"}, {"color": [237, 39, 71], "name": "Decals.002"}, {"color": [226, 66, 148], "name": "Bottle4.001"}, {"color": [246, 16, 151], "name": "StoveKnob|-00.62|+00.90|-01.83"}, {"color": [36, 91, 74], "name": "Tomato|-01.32|+00.93|-03.53"}, {"color": [119, 189, 121], "name": "Tomato"}, {"color": [193, 44, 202], "name": "Cabinet|-00.63|+00.39|-03.01"}, {"color": [118, 102, 24], "name": "SugarJar.004"}, {"color": [92, 62, 94], "name": "VenetianFrame"}, {"color": [14, 97, 183], "name": "Towl1"}, {"color": [87, 195, 41], "name": "GarbageCan|-00.36|00.00|-00.21"}, {"color": [225, 40, 55], "name": "GarbageCan"}, {"color": [110, 132, 248], "name": "CoffeeMachine|-02.65|+00.93|-03.57"}, {"color": [147, 71, 238], "name": "CoffeeMachine2"}, {"color": [214, 15, 78], "name": "Floor"}, {"color": [73, 64, 168], "name": "Room"}, {"color": [89, 77, 61], "name": "Cube.086"}, {"color": [127, 29, 203], "name": "Cube.082"}, {"color": [97, 134, 44], "name": "StoveTopDoor1"}, {"color": [140, 135, 166], "name": "Fork|-00.48|+00.81|-02.74"}, {"color": [54, 200, 25], "name": "Fork1"}, {"color": [185, 225, 171], "name": "StoveKnob|-00.62|+00.90|-02.29"}, {"color": [91, 94, 10], "name": "Egg|-00.21|+00.27|-00.83"}, {"color": [240, 75, 163], "name": "Egg"}, {"color": [162, 203, 153], "name": "Mug|-00.53|+00.93|-01.58"}, {"color": [1, 209, 145], "name": "Cabinet|-00.34|+02.11|-01.63"}, {"color": [104, 199, 254], "name": "Cabinet|-00.33|+01.89|-03.24"}, {"color": [29, 84, 249], "name": "Spoon|-00.50|+00.78|-01.45"}, {"color": [235, 57, 90], "name": "Spoon"}, {"color": [115, 3, 101], "name": "Decals.003"}, {"color": [71, 3, 53], "name": "Sphere.008"}, {"color": [191, 227, 85], "name": "Cabinet|-01.15|+00.78|-03.50"}, {"color": [238, 221, 39], "name": "Cabinet|-00.33|+01.89|-02.51"}, {"color": [18, 240, 113], "name": "SugarFill.006"}, {"color": [36, 61, 25], "name": "Cabinet|-00.34|+02.11|-02.50"}, {"color": [214, 68, 168], "name": "Mug|-01.63|+00.92|-03.74"}, {"color": [17, 67, 188], "name": "Outlet (1)"}, {"color": [66, 225, 0], "name": "ButterKnife|-00.43|+00.93|-02.60"}, {"color": [135, 147, 55], "name": "butterKnife"}, {"color": [115, 78, 181], "name": "StoveTopGas"}, {"color": [182, 114, 149], "name": "SugarJar.001"}, {"color": [139, 56, 140], "name": "StoveBottomDoor1"}, {"color": [202, 218, 132], "name": "Cube.109"}, {"color": [178, 18, 13], "name": "Apple|-01.49|+00.93|-03.50"}, {"color": [159, 98, 144], "name": "Apple"}, {"color": [20, 107, 195], "name": "SugarFill.001"}, {"color": [193, 221, 101], "name": "Plate|-00.15|+01.49|-02.73"}, {"color": [188, 154, 128], "name": "Plate"}, {"color": [55, 176, 84], "name": "Cabinet|-00.63|+00.39|-01.61"}, {"color": [145, 107, 85], "name": "Cabinet|-00.34|+02.11|-00.39"}, {"color": [138, 185, 132], "name": "SugarJar.003"}, {"color": [202, 210, 177], "name": "Bottle2.001"}, {"color": [141, 139, 54], "name": "Cabinet|-00.63|+00.39|-02.51"}, {"color": [96, 140, 59], "name": "Chair|-02.35|00.00|-03.60"}, {"color": [166, 13, 176], "name": "Chair5"}, {"color": [199, 148, 125], "name": "Bottle1.001"}, {"color": [34, 126, 70], "name": "ladel2.001"}, {"color": [48, 42, 241], "name": "SugarJar.006"}, {"color": [127, 73, 96], "name": "SugarFill.004"}, {"color": [219, 74, 174], "name": "Sugar.001"}, {"color": [216, 69, 22], "name": "SugarJar.002"}, {"color": [31, 88, 95], "name": "StoveBurner|-00.42|+00.93|-01.85"}, {"color": [193, 143, 140], "name": "Outlet"}, {"color": [97, 114, 178], "name": "Sphere.001"}, {"color": [56, 51, 197], "name": "Potato|-01.63|+00.93|-03.48"}, {"color": [187, 142, 9], "name": "Potato"}, {"color": [42, 54, 156], "name": "Bread|-01.33|+00.93|-03.71"}, {"color": [18, 150, 252], "name": "Bread"}, {"color": [195, 218, 223], "name": "Cabinet|-00.50|+00.78|-01.45"}, {"color": [34, 130, 237], "name": "Pot|-00.47|+00.08|-02.74"}, {"color": [132, 237, 87], "name": "Pot1"}, {"color": [110, 250, 103], "name": "Bottles.001"}, {"color": [4, 93, 193], "name": "Lettuce|-00.33|+00.74|-00.69"}, {"color": [203, 156, 88], "name": "Lettuce1"}, {"color": [241, 134, 252], "name": "Baseboard.020"}, {"color": [127, 127, 189], "name": "Pan|-00.68|+00.08|-03.27"}, {"color": [246, 212, 161], "name": "Pan1"}, {"color": [207, 119, 70], "name": "Spatula3.001"}, ], "errorCode": "", "errorMessage": "", "inventoryObjects": [], "lastAction": "RotateRight", "lastActionSuccess": True, "objects": [ { "bounds3D": [ -2.5750010013580322, 0.8563164472579956, -3.647000312805176, -1.5749990940093994, 0.9563164710998535, -3.3069992065429688, ], "cameraHorizon": 0.0, "distance": 3.6240997314453125, "isopen": False, "name": "Tabletop", "objectId": "TableTop|-02.08|+00.94|-03.62", "objectType": "TableTop", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.075000047683716, "y": 0.9433164596557617, "z": -3.622999906539917, }, "receptacle": True, "receptacleCount": 4, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 90.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.2521742284297943, 1.4949759244918823, -2.831829071044922, -0.05024271458387375, 1.5067294836044312, -2.6298975944519043, ], "cameraHorizon": 0.0, "distance": 2.6035001277923584, "isopen": False, "name": "Plate", "objectId": "Plate|-00.15|+01.49|-02.73", "objectType": "Plate", "openable": False, "parentReceptacle": "Cabinet|-00.33|+01.89|-02.51", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.15120847523212433, "y": 1.494760513305664, "z": -2.730863332748413, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8580825328826904, -2.015467643737793, -0.576196014881134, 0.9382582902908325, -1.9353333711624146, ], "cameraHorizon": 0.0, "distance": 1.7323315143585205, "isopen": False, "name": "StoveKnob2_Range2", "objectId": "StoveKnob|-00.62|+00.90|-01.98", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8996000289916992, "z": -1.9753999710083008, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -1.3614451885223389, 0.9283196926116943, -3.5663928985595703, -1.2814817428588867, 0.9905622005462646, -3.486574649810791, ], "cameraHorizon": 0.0, "distance": 3.3262617588043213, "isopen": False, "name": "Tomato", "objectId": "Tomato|-01.32|+00.93|-03.53", "objectType": "Tomato", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.3221999406814575, "y": 0.9303702116012573, "z": -3.5262999534606934, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.7945087552070618, 0.07984550297260284, -3.400216579437256, -0.5677620768547058, 0.12984557449817657, -3.1494078636169434, ], "cameraHorizon": 0.0, "distance": 3.1552624702453613, "isopen": False, "name": "Pan1", "objectId": "Pan|-00.68|+00.08|-03.27", "objectType": "Pan", "openable": False, "parentReceptacle": "Cabinet|-00.63|+00.39|-03.01", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6810178160667419, "y": 0.08484554290771484, "z": -3.274834156036377, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": { "x": -6.1288878896448296e-06, "y": 280.44842529296875, "z": 1.398907170369057e-05, }, "visible": False, }, { "bounds3D": [ -0.21095620095729828, 0.9303669929504395, -2.992823362350464, -0.09956331551074982, 1.1846275329589844, -2.8814303874969482, ], "cameraHorizon": 0.0, "distance": 2.7526044845581055, "isopen": False, "name": "Container", "objectId": "Container|-00.16|+00.93|-02.94", "objectType": "Container", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.15525996685028076, "y": 0.9303703308105469, "z": -2.937127113342285, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.40836191177368164, 0.14085793495178223, -1.15748929977417, 0.030406057834625244, 1.7145073413848877, -0.5005106925964355, ], "cameraHorizon": 0.0, "distance": 1.2551215887069702, "isopen": False, "name": "Fridge1", "objectId": "Fridge|-00.22|00.00|-00.83", "objectType": "Fridge", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [ {"objectId": "Egg|-00.21|+00.27|-00.83", "pivotId": 0}, {"objectId": "Lettuce|-00.33|+00.74|-00.69", "pivotId": 1}, ], "position": { "x": -0.22300000488758087, "y": -0.0010000000474974513, "z": -0.8289999961853027, }, "receptacle": True, "receptacleCount": 6, "receptacleObjectIds": [ "Egg|-00.21|+00.27|-00.83", "Lettuce|-00.33|+00.74|-00.69", ], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6255507469177246, 0.8067288994789124, -2.7551281452178955, -0.38278937339782715, 0.826447069644928, -2.7230093479156494, ], "cameraHorizon": 0.0, "distance": 2.509014844894409, "isopen": False, "name": "Fork1", "objectId": "Fork|-00.48|+00.81|-02.74", "objectType": "Fork", "openable": False, "parentReceptacle": "Cabinet|-00.48|+00.78|-02.74", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.48289254307746887, "y": 0.8116353750228882, "z": -2.7390687465667725, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.553860604763031, 0.2711416482925415, -0.4028606414794922, -0.16013938188552856, 0.6648629307746887, -0.00913935899734497, ], "cameraHorizon": 0.0, "distance": 1.0567800998687744, "isopen": False, "name": "GarbageCan", "objectId": "GarbageCan|-00.36|00.00|-00.21", "objectType": "GarbageCan", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.3569999933242798, "y": -3.196139175543067e-08, "z": -0.20600000023841858, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.8528260588645935, 0.9309259057044983, -3.9095852375030518, -0.714918315410614, 1.0337982177734375, -3.7689216136932373, ], "cameraHorizon": 0.0, "distance": 3.6004319190979004, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-00.78|+00.93|-03.85", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.7749999761581421, "y": 0.9301429986953735, "z": -3.8499999046325684, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 50.4573860168457, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.19851021468639374, 0.9635931253433228, -2.7536282539367676, -0.09219704568386078, 1.3012911081314087, -2.7334327697753906, ], "cameraHorizon": 0.0, "distance": 2.5751969814300537, "isopen": False, "name": "Knife1", "objectId": "Knife|-00.14|+01.12|-02.75", "objectType": "Knife", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.14190000295639038, "y": 1.117300033569336, "z": -2.7486000061035156, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 10.637146949768066, "y": 274.3685607910156, "z": 270.0}, "visible": False, }, { "bounds3D": [ -0.5118284225463867, 0.9333651065826416, -1.9365284442901611, -0.3299715518951416, 0.9572690725326538, -1.754671573638916, ], "cameraHorizon": 0.0, "distance": 1.629948377609253, "isopen": False, "name": "GasStoveTop_Range1", "objectId": "StoveBurner|-00.42|+00.93|-01.85", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.42089998722076416, "y": 0.9301429986953735, "z": -1.8456000089645386, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.2595430612564087, 1.4952101707458496, -1.5506460666656494, -0.06338601559400558, 1.5541222095489502, -1.3544890880584717, ], "cameraHorizon": 0.0, "distance": 1.4347065687179565, "isopen": False, "name": "Bowl", "objectId": "Bowl|-00.16|+01.50|-01.45", "objectType": "Bowl", "openable": False, "parentReceptacle": "Cabinet|-00.34|+01.89|-01.29", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.16146452724933624, "y": 1.495596170425415, "z": -1.45256769657135, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6566448211669922, 0.8584824800491333, -2.3290677070617676, -0.5764960050582886, 0.9386582374572754, -2.2489333152770996, ], "cameraHorizon": 0.0, "distance": 2.0448336601257324, "isopen": False, "name": "StoveKnob2_Range4", "objectId": "StoveKnob|-00.62|+00.90|-02.29", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6179999709129333, "y": 0.8999999761581421, "z": -2.2890000343322754, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -0.2558910846710205, 0.9301429390907288, -1.6137478351593018, -0.0713789314031601, 1.1241569519042969, -1.2920067310333252, ], "cameraHorizon": 0.0, "distance": 1.3391128778457642, "isopen": False, "name": "Toaster1", "objectId": "Toaster|-00.16|+00.93|-01.45", "objectType": "Toaster", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.1636350154876709, "y": 0.9301429986953735, "z": -1.4528772830963135, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.665656328201294, 0.924782931804657, -3.7827463150024414, -1.5564723014831543, 1.0276552438735962, -3.6940536499023438, ], "cameraHorizon": 0.0, "distance": 3.596900701522827, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-01.63|+00.92|-03.74", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.625, "y": 0.9240000247955322, "z": -3.7383999824523926, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 180.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.29263991117477417, 1.5244276523590088, -2.8414499759674072, -0.16177701950073242, 2.2490928173065186, -2.5138638019561768, ], "cameraHorizon": 0.0, "distance": 2.4750850200653076, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.33|+01.89|-02.51", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Plate|-00.15|+01.49|-02.73", "pivotId": 0}], "position": { "x": -0.3272084593772888, "y": 1.8867602348327637, "z": -2.5138635635375977, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Plate|-00.15|+01.49|-02.73"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6222020983695984, 0.7248871326446533, -1.614982008934021, -0.6195090413093567, 0.8706167936325073, -1.2865678071975708, ], "cameraHorizon": 0.0, "distance": 1.2426241636276245, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-00.50|+00.78|-01.45", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Spoon|-00.50|+00.78|-01.45", "pivotId": 0}], "position": { "x": -0.5008437633514404, "y": 0.7795612812042236, "z": -1.450774908065796, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Spoon|-00.50|+00.78|-01.45"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5953136682510376, 0.09301626682281494, -1.6149822473526, -0.4644508361816406, 0.6846745014190674, -1.3194092512130737, ], "cameraHorizon": 0.0, "distance": 1.4923365116119385, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-01.61", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6298819780349731, "y": 0.3888453245162964, "z": -1.6149822473526, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.2881675958633423, 0.7248872518539429, -3.3793442249298096, -1.0107892751693726, 0.8706167936325073, -3.376683473587036, ], "cameraHorizon": 0.0, "distance": 3.2784500122070312, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-01.15|+00.78|-03.50", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -1.1494783163070679, "y": 0.7825552225112915, "z": -3.4980251789093018, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -3.5819432735443115, 0.09301620721817017, -3.3748939037323, -0.9107897281646729, 0.6846743822097778, -3.362663507461548, ], "cameraHorizon": 0.0, "distance": 3.185004711151123, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-01.01|+00.39|-03.37", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -1.010789155960083, "y": 0.3888453245162964, "z": -3.368778705596924, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.8397345542907715, 0.09301596879959106, -3.5855960845947266, -0.3782111406326294, 0.6846745014190674, -3.124072551727295, ], "cameraHorizon": 0.0, "distance": 2.823883056640625, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-03.01", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Pan|-00.68|+00.08|-03.27", "pivotId": 0}], "position": { "x": -0.6330178380012512, "y": 0.3888453245162964, "z": -3.0088343620300293, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Pan|-00.68|+00.08|-03.27"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5953132510185242, 0.09301614761352539, -2.9192330837249756, -0.4644504189491272, 0.6846743822097778, -2.5138638019561768, ], "cameraHorizon": 0.0, "distance": 2.342855215072632, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-02.51", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Pot|-00.47|+00.08|-02.74", "pivotId": 0}], "position": { "x": -0.6298820972442627, "y": 0.3888453245162964, "z": -2.5138638019561768, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Pot|-00.47|+00.08|-02.74"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6035346984863281, 0.7248871326446533, -2.9642739295959473, -0.6004599332809448, 0.8706167936325073, -2.5138635635375977, ], "cameraHorizon": 0.0, "distance": 2.5116219520568848, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-00.48|+00.78|-02.74", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Fork|-00.48|+00.81|-02.74", "pivotId": 0}], "position": { "x": -0.4819878041744232, "y": 0.777635395526886, "z": -2.7390687465667725, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Fork|-00.48|+00.81|-02.74"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6152604818344116, 1.5292630195617676, -3.8681092262268066, -0.15373694896697998, 2.2539286613464355, -3.406585216522217, ], "cameraHorizon": 0.0, "distance": 3.2024600505828857, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.35|+01.89|-03.29", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.34654390811920166, "y": 1.8915960788726807, "z": -3.2933475971221924, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3028959631919861, 1.5292634963989258, -1.5821408033370972, -0.17203307151794434, 2.2539284229278564, -1.2865678071975708, ], "cameraHorizon": 0.0, "distance": 1.4407174587249756, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+01.89|-01.29", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Bowl|-00.16|+01.50|-01.45", "pivotId": 0}], "position": { "x": -0.33746451139450073, "y": 1.8915960788726807, "z": -1.2865678071975708, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Bowl|-00.16|+01.50|-01.45"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.33359596133232117, 1.9445738792419434, -2.497605323791504, -0.20273306965827942, 2.275726795196533, -2.12178373336792, ], "cameraHorizon": 0.0, "distance": 2.549344301223755, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-02.50", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -2.497605323791504, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.33359596133232117, 1.9445738792419434, -2.0148353576660156, -0.20273306965827942, 2.275726795196533, -1.631803035736084, ], "cameraHorizon": 0.0, "distance": 1.8321586847305298, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-01.63", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -1.6318029165267944, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.334695965051651, 1.9445741176605225, -1.2722522020339966, -0.20383307337760925, 2.275726556777954, -0.909758448600769, ], "cameraHorizon": 0.0, "distance": 1.5787419080734253, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-01.27", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -1.2722522020339966, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.334695965051651, 1.9445738792419434, -0.7808091640472412, -0.20383307337760925, 2.275726795196533, -0.3908956050872803, ], "cameraHorizon": 0.0, "distance": 1.2113124132156372, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-00.39", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -0.39089563488960266, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.29263991117477417, 1.524427890777588, -3.242128849029541, -0.16177701950073242, 2.2490928173065186, -2.9145426750183105, ], "cameraHorizon": 0.0, "distance": 3.1549649238586426, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.33|+01.89|-03.24", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.3272084593772888, "y": 1.8867603540420532, "z": -3.24212908744812, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.0901057720184326, 0.7320617437362671, -3.888105630874634, -0.12189435958862305, 0.952538251876831, -2.9198944568634033, ], "cameraHorizon": 0.0, "distance": 3.1575143337249756, "isopen": False, "name": "Sink", "objectId": "Sink|-00.61|+00.94|-03.40", "objectType": "Sink", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6060000061988831, "y": 0.9419999718666077, "z": -3.4040000438690186, }, "receptacle": True, "receptacleCount": 4, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 44.999996185302734, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.24254396557807922, 0.2711706757545471, -0.8578107357025146, -0.18492531776428223, 0.3472771644592285, -0.8001892566680908, ], "cameraHorizon": 0.0, "distance": 1.06029212474823, "isopen": False, "name": "Egg", "objectId": "Egg|-00.21|+00.27|-00.83", "objectType": "Egg", "openable": False, "parentReceptacle": "Fridge|-00.22|00.00|-00.83", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.2137332558631897, "y": 0.2719060778617859, "z": -0.8289999961853027, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.5313434600830078, 0.9396243691444397, -3.5390284061431885, -1.444072961807251, 1.0310288667678833, -3.452800989151001, ], "cameraHorizon": 0.0, "distance": 3.3288652896881104, "isopen": False, "name": "Apple", "objectId": "Apple|-01.49|+00.93|-03.50", "objectType": "Apple", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.4870775938034058, "y": 0.9303702116012573, "z": -3.495858669281006, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.42987868189811707, 0.7445617914199829, -0.7644813060760498, -0.27457037568092346, 0.8978313207626343, -0.614234447479248, ], "cameraHorizon": 0.0, "distance": 0.7373902201652527, "isopen": False, "name": "Lettuce1", "objectId": "Lettuce|-00.33|+00.74|-00.69", "objectType": "Lettuce", "openable": False, "parentReceptacle": "Fridge|-00.22|00.00|-00.83", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.2137332707643509, "y": 0.7358768582344055, "z": -0.6933581233024597, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8579825162887573, -1.8734675645828247, -0.576196014881134, 0.9381582736968994, -1.7933334112167358, ], "cameraHorizon": 0.0, "distance": 1.590955376625061, "isopen": False, "name": "StoveKnob2_Range1", "objectId": "StoveKnob|-00.62|+00.90|-01.83", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8995000123977661, "z": -1.833400011062622, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -0.6007806062698364, 0.9309259057044983, -1.624263048171997, -0.4915965795516968, 1.0337982177734375, -1.5355703830718994, ], "cameraHorizon": 0.0, "distance": 1.3485466241836548, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-00.53|+00.93|-01.58", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.5322529077529907, "y": 0.9301429986953735, "z": -1.5799167156219482, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3178284764289856, 0.9333651065826416, -2.3485283851623535, -0.1359715461730957, 0.9572690725326538, -2.1666717529296875, ], "cameraHorizon": 0.0, "distance": 2.0752294063568115, "isopen": False, "name": "GasStoveTop_Range3", "objectId": "StoveBurner|-00.23|+00.93|-02.26", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.22689999639987946, "y": 0.9301429986953735, "z": -2.2576000690460205, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5608127117156982, 0.9253336787223816, -2.6081254482269287, -0.2908085584640503, 0.9346393942832947, -2.578345537185669, ], "cameraHorizon": 0.0, "distance": 2.369608163833618, "isopen": False, "name": "butterKnife", "objectId": "ButterKnife|-00.43|+00.93|-02.60", "objectType": "ButterKnife", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.4278929829597473, "y": 0.9303703904151917, "z": -2.5970890522003174, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.4711631536483765, 0.9296106696128845, -3.788638114929199, -1.1927717924118042, 1.0843539237976074, -3.621340751647949, ], "cameraHorizon": 0.0, "distance": 3.504027843475342, "isopen": False, "name": "Bread", "objectId": "Bread|-01.33|+00.93|-03.71", "objectType": "Bread", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.3320000171661377, "y": 0.9303702712059021, "z": -3.7049999237060547, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 6.309757232666016, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8581824898719788, -2.1692676544189453, -0.576196014881134, 0.9383582472801208, -2.0891332626342773, ], "cameraHorizon": 0.0, "distance": 1.8855619430541992, "isopen": False, "name": "StoveKnob2_Range3", "objectId": "StoveKnob|-00.62|+00.90|-02.13", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8996999859809875, "z": -2.129199981689453, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -1.6801782846450806, 0.9300780892372131, -3.5211691856384277, -1.5957564115524292, 1.001486897468567, -3.4346466064453125, ], "cameraHorizon": 0.0, "distance": 3.3443284034729004, "isopen": False, "name": "Potato", "objectId": "Potato|-01.63|+00.93|-03.48", "objectType": "Potato", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.6319999694824219, "y": 0.9303702116012573, "z": -3.475545883178711, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3178284764289856, 0.9333651065826416, -1.9365284442901611, -0.1359715461730957, 0.9572690725326538, -1.754671573638916, ], "cameraHorizon": 0.0, "distance": 1.6798983812332153, "isopen": False, "name": "GasStoveTop_Range2", "objectId": "StoveBurner|-00.23|+00.93|-01.85", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.22689999639987946, "y": 0.9301429986953735, "z": -1.8456000089645386, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -2.784135103225708, 0.9281330108642578, -3.721567153930664, -2.5158650875091553, 1.3016245365142822, -3.4185357093811035, ], "cameraHorizon": 0.0, "distance": 3.8290257453918457, "isopen": False, "name": "CoffeeMachine2", "objectId": "CoffeeMachine|-02.65|+00.93|-03.57", "objectType": "CoffeeMachine", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.6500000953674316, "y": 0.9303701519966125, "z": -3.5739998817443848, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6211026906967163, 0.7797816395759583, -1.4715903997421265, -0.41446253657341003, 0.7992590069770813, -1.4300788640975952, ], "cameraHorizon": 0.0, "distance": 1.2420284748077393, "isopen": False, "name": "Spoon", "objectId": "Spoon|-00.50|+00.78|-01.45", "objectType": "Spoon", "openable": False, "parentReceptacle": "Cabinet|-00.50|+00.78|-01.45", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.4998437762260437, "y": 0.784561276435852, "z": -1.450774908065796, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5118284225463867, 0.9333651065826416, -2.3485283851623535, -0.3299715518951416, 0.9572690725326538, -2.1666717529296875, ], "cameraHorizon": 0.0, "distance": 2.035006284713745, "isopen": False, "name": "GasStoveTop_Range4", "objectId": "StoveBurner|-00.42|+00.93|-02.26", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.42089998722076416, "y": 0.9301429986953735, "z": -2.2576000690460205, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5738816261291504, 0.0948454737663269, -2.837768316268921, -0.37388163805007935, 0.2948455214500427, -2.637768030166626, ], "cameraHorizon": 0.0, "distance": 2.6583845615386963, "isopen": False, "name": "Pot1", "objectId": "Pot|-00.47|+00.08|-02.74", "objectType": "Pot", "openable": False, "parentReceptacle": "Cabinet|-00.63|+00.39|-02.51", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.4738820791244507, "y": 0.08484548330307007, "z": -2.737863779067993, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -2.613636016845703, 0.0006269514560699463, -3.853076219558716, -2.085458755493164, 0.874946117401123, -3.286182165145874, ], "cameraHorizon": 0.0, "distance": 3.8430612087249756, "isopen": False, "name": "Chair5", "objectId": "Chair|-02.35|00.00|-03.60", "objectType": "Chair", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.3540000915527344, "y": -5.653919288306497e-07, "z": -3.6019999980926514, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 74.2330551147461, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3505246043205261, 1.5073667764663696, -2.2319486141204834, 0.009090721607208252, 1.8599165678024292, -1.720513105392456, ], "cameraHorizon": 0.0, "distance": 1.961709976196289, "isopen": False, "name": "Microwave4", "objectId": "Microwave|-00.17|+01.49|-02.06", "objectType": "Microwave", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.1746000051498413, "y": 1.485553503036499, "z": -2.055999994277954, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, ], "sceneName": "FloorPlan28", "screenHeight": 300, "screenWidth": 300, } metadata_simple = { "agent": { "bounds3D": [], "cameraHorizon": 0.0, "distance": 0.0, "isopen": False, "name": "agent", "objectId": "", "objectType": "", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": {"x": -0.75, "y": 1.0, "z": -0.25}, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, "agentId": 0, "thirdPartyCameras": [], "collided": False, "collidedObjects": [], "colorBounds": [], "colors": [], "errorCode": "", "errorMessage": "", "inventoryObjects": [], "lastAction": "", "lastActionSuccess": False, "objects": [ { "bounds3D": [ -2.5750010013580322, 0.8563164472579956, -3.647000312805176, -1.5749990940093994, 0.9563164710998535, -3.3069992065429688, ], "cameraHorizon": 0.0, "distance": 3.6243574619293213, "isopen": False, "name": "Tabletop", "objectId": "TableTop|-02.08|+00.94|-03.62", "objectType": "TableTop", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.075000047683716, "y": 0.9433164596557617, "z": -3.622999906539917, }, "receptacle": True, "receptacleCount": 4, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 90.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.2521742284297943, 1.4949759244918823, -2.831829071044922, -0.05024271458387375, 1.5067294836044312, -2.6298975944519043, ], "cameraHorizon": 0.0, "distance": 2.5996196269989014, "isopen": False, "name": "Plate", "objectId": "Plate|-00.15|+01.49|-02.73", "objectType": "Plate", "openable": False, "parentReceptacle": "Cabinet|-00.33|+01.89|-02.51", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.15120847523212433, "y": 1.494760513305664, "z": -2.730863332748413, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8580825328826904, -2.015467643737793, -0.576196014881134, 0.9382582902908325, -1.9353333711624146, ], "cameraHorizon": 0.0, "distance": 1.7333749532699585, "isopen": False, "name": "StoveKnob2_Range2", "objectId": "StoveKnob|-00.62|+00.90|-01.98", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8996000289916992, "z": -1.9753999710083008, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -1.3614451885223389, 0.9283196926116943, -3.5663928985595703, -1.2814817428588867, 0.9905622005462646, -3.486574649810791, ], "cameraHorizon": 0.0, "distance": 3.32662034034729, "isopen": False, "name": "Tomato", "objectId": "Tomato|-01.32|+00.93|-03.53", "objectType": "Tomato", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.3221999406814575, "y": 0.9303702116012573, "z": -3.5262999534606934, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.7945087552070618, 0.07984550297260284, -3.400216579437256, -0.5677620768547058, 0.12984557449817657, -3.1494078636169434, ], "cameraHorizon": 0.0, "distance": 3.1609947681427, "isopen": False, "name": "Pan1", "objectId": "Pan|-00.68|+00.08|-03.27", "objectType": "Pan", "openable": False, "parentReceptacle": "Cabinet|-00.63|+00.39|-03.01", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6810178160667419, "y": 0.08484554290771484, "z": -3.274834156036377, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": { "x": -6.1288878896448296e-06, "y": 280.44842529296875, "z": 1.398907170369057e-05, }, "visible": False, }, { "bounds3D": [ -0.21095620095729828, 0.9303669929504395, -2.992823362350464, -0.09956331551074982, 1.1846275329589844, -2.8814303874969482, ], "cameraHorizon": 0.0, "distance": 2.753037691116333, "isopen": False, "name": "Container", "objectId": "Container|-00.16|+00.93|-02.94", "objectType": "Container", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.15525996685028076, "y": 0.9303703308105469, "z": -2.937127113342285, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.40836191177368164, 0.14085793495178223, -1.15748929977417, 0.030406057834625244, 1.7145073413848877, -0.5005106925964355, ], "cameraHorizon": 0.0, "distance": 1.270815134048462, "isopen": False, "name": "Fridge1", "objectId": "Fridge|-00.22|00.00|-00.83", "objectType": "Fridge", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [ {"objectId": "Egg|-00.21|+00.27|-00.83", "pivotId": 0}, {"objectId": "Lettuce|-00.33|+00.74|-00.69", "pivotId": 1}, ], "position": { "x": -0.22300000488758087, "y": -0.0010000000474974513, "z": -0.8289999961853027, }, "receptacle": True, "receptacleCount": 6, "receptacleObjectIds": [ "Egg|-00.21|+00.27|-00.83", "Lettuce|-00.33|+00.74|-00.69", ], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6255507469177246, 0.8067288994789124, -2.7551281452178955, -0.38278937339782715, 0.826447069644928, -2.7230093479156494, ], "cameraHorizon": 0.0, "distance": 2.5104362964630127, "isopen": False, "name": "Fork1", "objectId": "Fork|-00.48|+00.81|-02.74", "objectType": "Fork", "openable": False, "parentReceptacle": "Cabinet|-00.48|+00.78|-02.74", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.48289254307746887, "y": 0.8116353750228882, "z": -2.7390687465667725, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.553860604763031, 0.2711416482925415, -0.4028606414794922, -0.16013938188552856, 0.6648629307746887, -0.00913935899734497, ], "cameraHorizon": 0.0, "distance": 1.0753535032272339, "isopen": False, "name": "GarbageCan", "objectId": "GarbageCan|-00.36|00.00|-00.21", "objectType": "GarbageCan", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.3569999933242798, "y": -3.196139175543067e-08, "z": -0.20600000023841858, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.8528260588645935, 0.9309259057044983, -3.9095852375030518, -0.714918315410614, 1.0337982177734375, -3.7689216136932373, ], "cameraHorizon": 0.0, "distance": 3.600764513015747, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-00.78|+00.93|-03.85", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.7749999761581421, "y": 0.9301429986953735, "z": -3.8499999046325684, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 50.4573860168457, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.19851021468639374, 0.9635931253433228, -2.7536282539367676, -0.09219704568386078, 1.3012911081314087, -2.7334327697753906, ], "cameraHorizon": 0.0, "distance": 2.5742080211639404, "isopen": False, "name": "Knife1", "objectId": "Knife|-00.14|+01.12|-02.75", "objectType": "Knife", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.14190000295639038, "y": 1.117300033569336, "z": -2.7486000061035156, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 10.637146949768066, "y": 274.3685607910156, "z": 270.0}, "visible": False, }, { "bounds3D": [ -0.5118284225463867, 0.9333651065826416, -1.9365284442901611, -0.3299715518951416, 0.9572690725326538, -1.754671573638916, ], "cameraHorizon": 0.0, "distance": 1.6306827068328857, "isopen": False, "name": "GasStoveTop_Range1", "objectId": "StoveBurner|-00.42|+00.93|-01.85", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.42089998722076416, "y": 0.9301429986953735, "z": -1.8456000089645386, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.2595430612564087, 1.4952101707458496, -1.5506460666656494, -0.06338601559400558, 1.5541222095489502, -1.3544890880584717, ], "cameraHorizon": 0.0, "distance": 1.4276409149169922, "isopen": False, "name": "Bowl", "objectId": "Bowl|-00.16|+01.50|-01.45", "objectType": "Bowl", "openable": False, "parentReceptacle": "Cabinet|-00.34|+01.89|-01.29", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.16146452724933624, "y": 1.495596170425415, "z": -1.45256769657135, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6566448211669922, 0.8584824800491333, -2.3290677070617676, -0.5764960050582886, 0.9386582374572754, -2.2489333152770996, ], "cameraHorizon": 0.0, "distance": 2.0457139015197754, "isopen": False, "name": "StoveKnob2_Range4", "objectId": "StoveKnob|-00.62|+00.90|-02.29", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6179999709129333, "y": 0.8999999761581421, "z": -2.2890000343322754, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -0.2558910846710205, 0.9301429390907288, -1.6137478351593018, -0.0713789314031601, 1.1241569519042969, -1.2920067310333252, ], "cameraHorizon": 0.0, "distance": 1.3400065898895264, "isopen": False, "name": "Toaster1", "objectId": "Toaster|-00.16|+00.93|-01.45", "objectType": "Toaster", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.1636350154876709, "y": 0.9301429986953735, "z": -1.4528772830963135, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.665656328201294, 0.924782931804657, -3.7827463150024414, -1.5564723014831543, 1.0276552438735962, -3.6940536499023438, ], "cameraHorizon": 0.0, "distance": 3.5972678661346436, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-01.63|+00.92|-03.74", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.625, "y": 0.9240000247955322, "z": -3.7383999824523926, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 180.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.29263991117477417, 1.5244276523590088, -2.8414499759674072, -0.16177701950073242, 2.2490928173065186, -2.5138638019561768, ], "cameraHorizon": 0.0, "distance": 2.4678280353546143, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.33|+01.89|-02.51", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Plate|-00.15|+01.49|-02.73", "pivotId": 0}], "position": { "x": -0.3272084593772888, "y": 1.8867602348327637, "z": -2.5138635635375977, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Plate|-00.15|+01.49|-02.73"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6222020983695984, 0.7248871326446533, -1.614982008934021, -0.6195090413093567, 0.8706167936325073, -1.2865678071975708, ], "cameraHorizon": 0.0, "distance": 1.2460066080093384, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-00.50|+00.78|-01.45", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Spoon|-00.50|+00.78|-01.45", "pivotId": 0}], "position": { "x": -0.5008437633514404, "y": 0.7795612812042236, "z": -1.450774908065796, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Spoon|-00.50|+00.78|-01.45"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5953136682510376, 0.09301626682281494, -1.6149822473526, -0.4644508361816406, 0.6846745014190674, -1.3194092512130737, ], "cameraHorizon": 0.0, "distance": 1.5003715753555298, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-01.61", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6298819780349731, "y": 0.3888453245162964, "z": -1.6149822473526, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.2881675958633423, 0.7248872518539429, -3.3793442249298096, -1.0107892751693726, 0.8706167936325073, -3.376683473587036, ], "cameraHorizon": 0.0, "distance": 3.2797152996063232, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-01.15|+00.78|-03.50", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -1.1494783163070679, "y": 0.7825552225112915, "z": -3.4980251789093018, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -3.5819432735443115, 0.09301620721817017, -3.3748939037323, -0.9107897281646729, 0.6846743822097778, -3.362663507461548, ], "cameraHorizon": 0.0, "distance": 3.188777446746826, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-01.01|+00.39|-03.37", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -1.010789155960083, "y": 0.3888453245162964, "z": -3.368778705596924, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.8397345542907715, 0.09301596879959106, -3.5855960845947266, -0.3782111406326294, 0.6846745014190674, -3.124072551727295, ], "cameraHorizon": 0.0, "distance": 2.8281376361846924, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-03.01", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Pan|-00.68|+00.08|-03.27", "pivotId": 0}], "position": { "x": -0.6330178380012512, "y": 0.3888453245162964, "z": -3.0088343620300293, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Pan|-00.68|+00.08|-03.27"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5953132510185242, 0.09301614761352539, -2.9192330837249756, -0.4644504189491272, 0.6846743822097778, -2.5138638019561768, ], "cameraHorizon": 0.0, "distance": 2.3479816913604736, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.63|+00.39|-02.51", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Pot|-00.47|+00.08|-02.74", "pivotId": 0}], "position": { "x": -0.6298820972442627, "y": 0.3888453245162964, "z": -2.5138638019561768, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Pot|-00.47|+00.08|-02.74"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6035346984863281, 0.7248871326446533, -2.9642739295959473, -0.6004599332809448, 0.8706167936325073, -2.5138635635375977, ], "cameraHorizon": 0.0, "distance": 2.513312578201294, "isopen": False, "name": "Drawer", "objectId": "Cabinet|-00.48|+00.78|-02.74", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Fork|-00.48|+00.81|-02.74", "pivotId": 0}], "position": { "x": -0.4819878041744232, "y": 0.777635395526886, "z": -2.7390687465667725, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Fork|-00.48|+00.81|-02.74"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6152604818344116, 1.5292630195617676, -3.8681092262268066, -0.15373694896697998, 2.2539286613464355, -3.406585216522217, ], "cameraHorizon": 0.0, "distance": 3.196824073791504, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.35|+01.89|-03.29", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.34654390811920166, "y": 1.8915960788726807, "z": -3.2933475971221924, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3028959631919861, 1.5292634963989258, -1.5821408033370972, -0.17203307151794434, 2.2539284229278564, -1.2865678071975708, ], "cameraHorizon": 0.0, "distance": 1.428146243095398, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+01.89|-01.29", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [{"objectId": "Bowl|-00.16|+01.50|-01.45", "pivotId": 0}], "position": { "x": -0.33746451139450073, "y": 1.8915960788726807, "z": -1.2865678071975708, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": ["Bowl|-00.16|+01.50|-01.45"], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.33359596133232117, 1.9445738792419434, -2.497605323791504, -0.20273306965827942, 2.275726795196533, -2.12178373336792, ], "cameraHorizon": 0.0, "distance": 2.540541172027588, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-02.50", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -2.497605323791504, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.33359596133232117, 1.9445738792419434, -2.0148353576660156, -0.20273306965827942, 2.275726795196533, -1.631803035736084, ], "cameraHorizon": 0.0, "distance": 1.8198896646499634, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-01.63", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -1.6318029165267944, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.334695965051651, 1.9445741176605225, -1.2722522020339966, -0.20383307337760925, 2.275726556777954, -0.909758448600769, ], "cameraHorizon": 0.0, "distance": 1.5644868612289429, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-01.27", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -1.2722522020339966, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.334695965051651, 1.9445738792419434, -0.7808091640472412, -0.20383307337760925, 2.275726795196533, -0.3908956050872803, ], "cameraHorizon": 0.0, "distance": 1.1926738023757935, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.34|+02.11|-00.39", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.33746451139450073, "y": 2.1101503372192383, "z": -0.39089563488960266, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.29263991117477417, 1.524427890777588, -3.242128849029541, -0.16177701950073242, 2.2490928173065186, -2.9145426750183105, ], "cameraHorizon": 0.0, "distance": 3.149275064468384, "isopen": False, "name": "Cabinet", "objectId": "Cabinet|-00.33|+01.89|-03.24", "objectType": "Cabinet", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.3272084593772888, "y": 1.8867603540420532, "z": -3.24212908744812, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.0901057720184326, 0.7320617437362671, -3.888105630874634, -0.12189435958862305, 0.952538251876831, -2.9198944568634033, ], "cameraHorizon": 0.0, "distance": 3.15781831741333, "isopen": False, "name": "Sink", "objectId": "Sink|-00.61|+00.94|-03.40", "objectType": "Sink", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6060000061988831, "y": 0.9419999718666077, "z": -3.4040000438690186, }, "receptacle": True, "receptacleCount": 4, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 44.999996185302734, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.24254396557807922, 0.2711706757545471, -0.8578107357025146, -0.18492531776428223, 0.3472771644592285, -0.8001892566680908, ], "cameraHorizon": 0.0, "distance": 1.0737521648406982, "isopen": False, "name": "Egg", "objectId": "Egg|-00.21|+00.27|-00.83", "objectType": "Egg", "openable": False, "parentReceptacle": "Fridge|-00.22|00.00|-00.83", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.2137332558631897, "y": 0.2719060778617859, "z": -0.8289999961853027, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.5313434600830078, 0.9396243691444397, -3.5390284061431885, -1.444072961807251, 1.0310288667678833, -3.452800989151001, ], "cameraHorizon": 0.0, "distance": 3.3292236328125, "isopen": False, "name": "Apple", "objectId": "Apple|-01.49|+00.93|-03.50", "objectType": "Apple", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.4870775938034058, "y": 0.9303702116012573, "z": -3.495858669281006, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.42987868189811707, 0.7445617914199829, -0.7644813060760498, -0.27457037568092346, 0.8978313207626343, -0.614234447479248, ], "cameraHorizon": 0.0, "distance": 0.7442509531974792, "isopen": False, "name": "Lettuce1", "objectId": "Lettuce|-00.33|+00.74|-00.69", "objectType": "Lettuce", "openable": False, "parentReceptacle": "Fridge|-00.22|00.00|-00.83", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.2137332707643509, "y": 0.7358768582344055, "z": -0.6933581233024597, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 270.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8579825162887573, -1.8734675645828247, -0.576196014881134, 0.9381582736968994, -1.7933334112167358, ], "cameraHorizon": 0.0, "distance": 1.5920926332473755, "isopen": False, "name": "StoveKnob2_Range1", "objectId": "StoveKnob|-00.62|+00.90|-01.83", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8995000123977661, "z": -1.833400011062622, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -0.6007806062698364, 0.9309259057044983, -1.624263048171997, -0.4915965795516968, 1.0337982177734375, -1.5355703830718994, ], "cameraHorizon": 0.0, "distance": 1.3494340181350708, "isopen": False, "name": "CoffeeCup1", "objectId": "Mug|-00.53|+00.93|-01.58", "objectType": "Mug", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.5322529077529907, "y": 0.9301429986953735, "z": -1.5799167156219482, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3178284764289856, 0.9333651065826416, -2.3485283851623535, -0.1359715461730957, 0.9572690725326538, -2.1666717529296875, ], "cameraHorizon": 0.0, "distance": 2.0758063793182373, "isopen": False, "name": "GasStoveTop_Range3", "objectId": "StoveBurner|-00.23|+00.93|-02.26", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.22689999639987946, "y": 0.9301429986953735, "z": -2.2576000690460205, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5608127117156982, 0.9253336787223816, -2.6081254482269287, -0.2908085584640503, 0.9346393942832947, -2.578345537185669, ], "cameraHorizon": 0.0, "distance": 2.3701114654541016, "isopen": False, "name": "butterKnife", "objectId": "ButterKnife|-00.43|+00.93|-02.60", "objectType": "ButterKnife", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.4278929829597473, "y": 0.9303703904151917, "z": -2.5970890522003174, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -1.4711631536483765, 0.9296106696128845, -3.788638114929199, -1.1927717924118042, 1.0843539237976074, -3.621340751647949, ], "cameraHorizon": 0.0, "distance": 3.504368305206299, "isopen": False, "name": "Bread", "objectId": "Bread|-01.33|+00.93|-03.71", "objectType": "Bread", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.3320000171661377, "y": 0.9303702712059021, "z": -3.7049999237060547, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 6.309757232666016, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6563448309898376, 0.8581824898719788, -2.1692676544189453, -0.576196014881134, 0.9383582472801208, -2.0891332626342773, ], "cameraHorizon": 0.0, "distance": 1.8865195512771606, "isopen": False, "name": "StoveKnob2_Range3", "objectId": "StoveKnob|-00.62|+00.90|-02.13", "objectType": "StoveKnob", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.6176999807357788, "y": 0.8996999859809875, "z": -2.129199981689453, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406}, "visible": False, }, { "bounds3D": [ -1.6801782846450806, 0.9300780892372131, -3.5211691856384277, -1.5957564115524292, 1.001486897468567, -3.4346466064453125, ], "cameraHorizon": 0.0, "distance": 3.3446850776672363, "isopen": False, "name": "Potato", "objectId": "Potato|-01.63|+00.93|-03.48", "objectType": "Potato", "openable": False, "parentReceptacle": "", "pickupable": True, "pivotSimObjs": [], "position": { "x": -1.6319999694824219, "y": 0.9303702116012573, "z": -3.475545883178711, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3178284764289856, 0.9333651065826416, -1.9365284442901611, -0.1359715461730957, 0.9572690725326538, -1.754671573638916, ], "cameraHorizon": 0.0, "distance": 1.6806108951568604, "isopen": False, "name": "GasStoveTop_Range2", "objectId": "StoveBurner|-00.23|+00.93|-01.85", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.22689999639987946, "y": 0.9301429986953735, "z": -1.8456000089645386, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -2.784135103225708, 0.9281330108642578, -3.721567153930664, -2.5158650875091553, 1.3016245365142822, -3.4185357093811035, ], "cameraHorizon": 0.0, "distance": 3.8293373584747314, "isopen": False, "name": "CoffeeMachine2", "objectId": "CoffeeMachine|-02.65|+00.93|-03.57", "objectType": "CoffeeMachine", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.6500000953674316, "y": 0.9303701519966125, "z": -3.5739998817443848, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.6211026906967163, 0.7797816395759583, -1.4715903997421265, -0.41446253657341003, 0.7992590069770813, -1.4300788640975952, ], "cameraHorizon": 0.0, "distance": 1.2453322410583496, "isopen": False, "name": "Spoon", "objectId": "Spoon|-00.50|+00.78|-01.45", "objectType": "Spoon", "openable": False, "parentReceptacle": "Cabinet|-00.50|+00.78|-01.45", "pickupable": True, "pivotSimObjs": [], "position": { "x": -0.4998437762260437, "y": 0.784561276435852, "z": -1.450774908065796, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5118284225463867, 0.9333651065826416, -2.3485283851623535, -0.3299715518951416, 0.9572690725326538, -2.1666717529296875, ], "cameraHorizon": 0.0, "distance": 2.0355944633483887, "isopen": False, "name": "GasStoveTop_Range4", "objectId": "StoveBurner|-00.42|+00.93|-02.26", "objectType": "StoveBurner", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.42089998722076416, "y": 0.9301429986953735, "z": -2.2576000690460205, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.5738816261291504, 0.0948454737663269, -2.837768316268921, -0.37388163805007935, 0.2948455214500427, -2.637768030166626, ], "cameraHorizon": 0.0, "distance": 2.6651856899261475, "isopen": False, "name": "Pot1", "objectId": "Pot|-00.47|+00.08|-02.74", "objectType": "Pot", "openable": False, "parentReceptacle": "Cabinet|-00.63|+00.39|-02.51", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.4738820791244507, "y": 0.08484548330307007, "z": -2.737863779067993, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0}, "visible": False, }, { "bounds3D": [ -2.613636016845703, 0.0006269514560699463, -3.853076219558716, -2.085458755493164, 0.874946117401123, -3.286182165145874, ], "cameraHorizon": 0.0, "distance": 3.848210096359253, "isopen": False, "name": "Chair5", "objectId": "Chair|-02.35|00.00|-03.60", "objectType": "Chair", "openable": False, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -2.3540000915527344, "y": -5.653919288306497e-07, "z": -3.6019999980926514, }, "receptacle": False, "receptacleCount": 0, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 74.2330551147461, "z": 0.0}, "visible": False, }, { "bounds3D": [ -0.3505246043205261, 1.5073667764663696, -2.2319486141204834, 0.009090721607208252, 1.8599165678024292, -1.720513105392456, ], "cameraHorizon": 0.0, "distance": 1.9566510915756226, "isopen": False, "name": "Microwave4", "objectId": "Microwave|-00.17|+01.49|-02.06", "objectType": "Microwave", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.1746000051498413, "y": 1.485553503036499, "z": -2.055999994277954, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, }, ], "sceneName": "FloorPlan28", "screenHeight": 300, "screenWidth": 300, } @pytest.fixture def event_complex(): return Event(metadata_complex) @pytest.fixture def event(): return Event(metadata_simple) @pytest.fixture def event_with_frame(event): e = event with open(os.path.join(TESTS_DATA_DIR, "rgb-image.raw"), "rb") as f: raw_image = memoryview(f.read()) e.add_image(raw_image) return e def test_get_object(event): microwave = { "bounds3D": [ -0.3505246043205261, 1.5073667764663696, -2.2319486141204834, 0.009090721607208252, 1.8599165678024292, -1.720513105392456, ], "cameraHorizon": 0.0, "distance": 1.9566510915756226, "isopen": False, "name": "Microwave4", "objectId": "Microwave|-00.17|+01.49|-02.06", "objectType": "Microwave", "openable": True, "parentReceptacle": "", "pickupable": False, "pivotSimObjs": [], "position": { "x": -0.1746000051498413, "y": 1.485553503036499, "z": -2.055999994277954, }, "receptacle": True, "receptacleCount": 1, "receptacleObjectIds": [], "rotation": {"x": 0.0, "y": 0.0, "z": 0.0}, "visible": False, } assert event.get_object("Microwave|-00.17|+01.49|-02.06") == microwave assert event.get_object("FOOO") is None def test_cv2img(event_with_frame): cvf = np.load(os.path.join(TESTS_DATA_DIR, "test-image1-bgr.npy")) assert event_with_frame.cv2img.shape == event_with_frame.frame.shape assert np.all(cvf == event_with_frame.cv2img) assert not np.all(event_with_frame.frame == event_with_frame.cv2img) def test_add_image(event): with open(os.path.join(TESTS_DATA_DIR, "rgb-image.raw"), "rb") as f: raw_image = memoryview(f.read()) f = np.load(os.path.join(TESTS_DATA_DIR, "test-image1-rgb.npy")) assert event.frame is None event.add_image(raw_image) assert event.frame.shape == (300, 300, 3) assert np.all(f == event.frame) def test_metadata(event): assert event.screen_height == 300 assert event.screen_width == 300 assert event.pose == (-750, -250, 0, 0) def test_objets_by_test(event): all_mugs = [o["objectId"] for o in event.objects_by_type("Mug")] mug_object_ids = [ "Mug|-00.78|+00.93|-03.85", "Mug|-01.63|+00.92|-03.74", "Mug|-00.53|+00.93|-01.58", ] assert all_mugs == mug_object_ids assert event.objects_by_type("FOO") == [] def test_process_colors(event_complex): event_complex.process_colors assert len(event_complex.color_to_object_id.keys()) == 125 assert event_complex.color_to_object_id[(207, 119, 70)] == "Spatula3.001" assert ( event_complex.color_to_object_id[(141, 139, 54)] == "Cabinet|-00.63|+00.39|-02.51" ) assert ( event_complex.color_to_object_id[(29, 84, 249)] == "Spoon|-00.50|+00.78|-01.45" ) assert event_complex.color_to_object_id[(235, 57, 90)] == "Spoon" assert event_complex.object_id_to_color["Spatula3.001"] == (207, 119, 70) assert event_complex.object_id_to_color["Cabinet|-00.63|+00.39|-02.51"] == ( 141, 139, 54, ) assert event_complex.object_id_to_color["Spoon|-00.50|+00.78|-01.45"] == ( 29, 84, 249, ) assert event_complex.object_id_to_color["Spoon"] == (235, 57, 90)
35.464637
87
0.430589
9,143
118,842
5.584053
0.102592
0.012261
0.029674
0.0455
0.856214
0.855822
0.853648
0.829086
0.827598
0.827598
0
0.334787
0.403898
118,842
3,350
88
35.475224
0.385901
0
0
0.801143
0
0
0.236743
0.046473
0
0
0
0
0.006619
1
0.002708
false
0
0.001504
0.000602
0.005114
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
be2670d8565e9eaf449d500cde28ee99708be4a1
8,533
py
Python
prior_utils/prepare_prior_cgenie.py
frodre/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
17
2018-08-27T18:50:36.000Z
2021-03-17T22:48:55.000Z
prior_utils/prepare_prior_cgenie.py
mingsongli/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
5
2018-10-15T22:13:27.000Z
2019-04-26T11:45:58.000Z
prior_utils/prepare_prior_cgenie.py
mingsongli/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
11
2018-10-11T19:35:34.000Z
2021-08-17T12:08:11.000Z
""" Module: prepare_prior_cgenie.py Purpose: Extract data from a set of variables from a cGENIE climate model simulation to generate files formatted for input into the LMR data assimilation system. Originator: Robert Tardif - University of Washington : August 2017 """ import os # ============================================================================== input_data_directory = '/home/disk/ekman/rtardif/kalman3/LMR/data/model/cgenie_petm/orig_files/' output_data_directory = '/home/disk/ekman/rtardif/kalman3/LMR/data/model/cgenie_petm/' # GENIE output files, input to this script file_2d_fields = input_data_directory+'fields_biogem_2d.nc' file_3d_fields = input_data_directory+'fields_biogem_3d.nc' #time_interval = 'ann' # for files with data every year time_interval = 'dec' # for files with data every decade #time_interval = 'cen' # for files with data every century # ============================================================================== FillVal = 9.96920996839e+36 # --------------------------------------------------------------------- # 1) extract near-surface air temperature from GENIE 2D file (atm_temp) # --------------------------------------------------------------------- lmr_file = output_data_directory+'tas_sfc_A%s_cgenie_petm.nc' %(time_interval) genie_variable = 'atm_temp' # extract the variable command = 'ncks -v %s %s %s' %(genie_variable, file_2d_fields,lmr_file) status = os.system(command) if status == 0: # rename to LMR variable command = 'ncrename -O -v atm_temp,tas %s' %(lmr_file) status = os.system(command) # convert deg C to Kelvins # mv data file to temporary file command = 'mv -f %s tmp.nc' %(lmr_file) status = os.system(command) # rename "missing_value" as _FillValue (recognized by NCO) command = 'ncrename -a .missing_value,_FillValue tmp.nc tmp2.nc' status = os.system(command) # perform conversion and put results in lmr_file command = 'ncap -O -s "tas=(tas+273.15)" tmp2.nc %s' %(lmr_file) status = os.system(command) # delete temporary files command = 'rm -f tmp.nc tmp2.nc' status = os.system(command) # re-add variable attributes command = 'ncatted -O -a long_name,tas,c,c,"surface air temperature" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,tas,c,c,"K" %s' %(lmr_file) status = os.system(command) command = 'ncatted -a _FillValue,,m,f,%f %s' %(FillVal,lmr_file) status = os.system(command) command = 'ncatted -a missing_value,,c,f,%f %s' %(FillVal,lmr_file) status = os.system(command) # add necessary attributes to time variable command = 'ncatted -O -a calendar,time,c,c,"noleap" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,time,o,c,"year mid-point" %s' %(lmr_file) status = os.system(command) # ------------------------------------------------ # 2) extract SST from GENIE 2D file (ocn_sur_temp) # ------------------------------------------------ lmr_file = output_data_directory+'tos_sfc_O%s_cgenie_petm.nc' %(time_interval) genie_variable = 'ocn_sur_temp' # extract the variable command = 'ncks -v %s %s %s' %(genie_variable,file_2d_fields,lmr_file) status = os.system(command) if status == 0: # rename to LMR variable command = 'ncrename -O -v ocn_sur_temp,tos %s' %(lmr_file) status = os.system(command) # convert deg C to Kelvins # mv data file to temporary file command = 'mv -f %s tmp.nc' %(lmr_file) status = os.system(command) # rename "missing_value" as _FillValue (recognized by NCO) command = 'ncrename -a .missing_value,_FillValue tmp.nc tmp2.nc' status = os.system(command) # perform conversion and put results in lmr_file command = 'ncap -O -s "tos=(tos+273.15)" tmp2.nc %s' %(lmr_file) status = os.system(command) # delete temporary files command = 'rm -f tmp.nc tmp2.nc' status = os.system(command) # re-add variable attributes command = 'ncatted -O -a long_name,tos,c,c,"surface-water temp" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,tos,c,c,"K" %s' %(lmr_file) status = os.system(command) command = 'ncatted -a _FillValue,,m,f,%f %s' %(FillVal,lmr_file) status = os.system(command) command = 'ncatted -a missing_value,,c,f,%f %s' %(FillVal,lmr_file) status = os.system(command) # add necessary attributes to time variable command = 'ncatted -O -a calendar,time,c,c,"noleap" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,time,o,c,"year mid-point" %s' %(lmr_file) status = os.system(command) # ----------------------------------------------- # 3) extract SSS from GENIE 2D file (ocn_sur_sal) # ------------------------------------------------ lmr_file = output_data_directory+'sos_sfc_O%s_cgenie_petm.nc' %(time_interval) genie_variable = 'ocn_sur_sal' # extract the variable command = 'ncks -v %s %s %s' %(genie_variable,file_2d_fields,lmr_file) status = os.system(command) if status == 0: # rename to LMR variable command = 'ncrename -O -v ocn_sur_sal,sos %s' %(lmr_file) status = os.system(command) # rename "missing_value" as _FillValue (recognized by NCO) command = 'ncrename -a .missing_value,_FillValue %s' %(lmr_file) status = os.system(command) # re-add variable attributes command = 'ncatted -a _FillValue,,m,f,%f %s' %(FillVal,lmr_file) status = os.system(command) command = 'ncatted -a missing_value,,c,f,%f %s' %(FillVal,lmr_file) status = os.system(command) # add necessary attributes to time variable command = 'ncatted -O -a calendar,time,c,c,"noleap" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,time,o,c,"year mid-point" %s' %(lmr_file) status = os.system(command) # --------------------------------------------------------- # 4) extract sea-ice cover from GENIE 2D file (phys_seaice) # --------------------------------------------------------- lmr_file = output_data_directory+'sic_sfc_OI%s_cgenie_petm.nc' %(time_interval) genie_variable = 'phys_seaice' # extract the variable command = 'ncks -v %s %s %s' %(genie_variable,file_2d_fields,lmr_file) status = os.system(command) if status == 0: # rename to LMR variable command = 'ncrename -O -v phys_seaice,sic %s' %(lmr_file) status = os.system(command) # rename "missing_value" as _FillValue (recognized by NCO) command = 'ncrename -a .missing_value,_FillValue %s' %(lmr_file) status = os.system(command) # re-add variable attributes command = 'ncatted -a _FillValue,,m,f,%f %s' %(FillVal,lmr_file) status = os.system(command) command = 'ncatted -a missing_value,,c,f,%f %s' %(FillVal,lmr_file) status = os.system(command) # add necessary attributes to time variable command = 'ncatted -O -a calendar,time,c,c,"noleap" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,time,o,c,"year mid-point" %s' %(lmr_file) status = os.system(command) # modify units attribute to sea-ice cover command = 'ncatted -O -a units,sic,o,c,"percent" %s' %(lmr_file) status = os.system(command) # ---------------------------------------------------------------- # 5) extract sea-ice thickness from GENIE 2D file (phys_seaice_th) # ---------------------------------------------------------------- lmr_file = output_data_directory+'sit_sfc_OI%s_cgenie_petm.nc' %(time_interval) genie_variable = 'phys_seaice_th' # extract the variable command = 'ncks -v %s %s %s' %(genie_variable,file_2d_fields,lmr_file) status = os.system(command) if status == 0: # rename to LMR variable command = 'ncrename -O -v phys_seaice_th,sit %s' %(lmr_file) status = os.system(command) # rename "missing_value" as _FillValue (recognized by NCO) command = 'ncrename -a .missing_value,_FillValue %s' %(lmr_file) status = os.system(command) # re-add variable attributes command = 'ncatted -a _FillValue,,m,f,%f %s' %(FillVal,lmr_file) status = os.system(command) command = 'ncatted -a missing_value,,c,f,%f %s' %(FillVal,lmr_file) status = os.system(command) # add necessary attributes to time variable command = 'ncatted -O -a calendar,time,c,c,"noleap" %s' %(lmr_file) status = os.system(command) command = 'ncatted -O -a units,time,o,c,"year mid-point" %s' %(lmr_file) status = os.system(command)
37.924444
97
0.624517
1,182
8,533
4.352792
0.130288
0.066667
0.12517
0.187755
0.87036
0.838873
0.795335
0.789699
0.782313
0.782313
0
0.008487
0.171452
8,533
224
98
38.09375
0.719236
0.309153
0
0.745614
1
0.008772
0.34197
0.120796
0
0
0
0
0
1
0
false
0
0.008772
0
0.008772
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
be28c6bcd0585c61f4d6f3ff8e3447dc5d801e96
15,379
py
Python
ctpbee/record.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[ "MIT" ]
null
null
null
ctpbee/record.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[ "MIT" ]
null
null
null
ctpbee/record.py
yutiansut/ctpbee
02ceb3d4456a54b1b4f8066a2662c4b8fac1027f
[ "MIT" ]
3
2019-11-21T03:38:14.000Z
2022-02-14T08:09:11.000Z
from copy import deepcopy from datetime import datetime from ctpbee.constant import EVENT_TICK, EVENT_ORDER, EVENT_TRADE, EVENT_POSITION, EVENT_ACCOUNT, \ EVENT_CONTRACT, EVENT_BAR, EVENT_LOG, EVENT_ERROR, EVENT_SHARED from ctpbee.data_handle import generator from ctpbee.data_handle.local_position import LocalPositionManager from ctpbee.event_engine import Event class Recorder(object): """ data center """ def __init__(self, app, event_engine): """""" self.bar = {} self.ticks = {} self.orders = {} self.trades = {} self.positions = {} self.accounts = {} self.contracts = {} self.logs = {} self.errors = [] self.shared = {} self.generators = {} self.active_orders = {} self.event_engine = event_engine self.register_event() self.app = app self.position_manager = LocalPositionManager(app=self.app) @staticmethod def get_local_time(): from datetime import datetime return datetime.now().strftime('%Y-%m-%d %H:%M:%S') def register_event(self): """bind process function""" self.event_engine.register(EVENT_TICK, self.process_tick_event) self.event_engine.register(EVENT_ORDER, self.process_order_event) self.event_engine.register(EVENT_TRADE, self.process_trade_event) self.event_engine.register(EVENT_POSITION, self.process_position_event) self.event_engine.register(EVENT_ACCOUNT, self.process_account_event) self.event_engine.register(EVENT_CONTRACT, self.process_contract_event) self.event_engine.register(EVENT_BAR, self.process_bar_event) self.event_engine.register(EVENT_LOG, self.process_log_event) self.event_engine.register(EVENT_ERROR, self.process_error_event) self.event_engine.register(EVENT_SHARED, self.process_shared_event) def process_shared_event(self, event): if self.shared.get(event.data.local_symbol, None) is not None: self.shared[event.data.local_symbol].append(event.data) else: self.shared[event.data.local_symbol] = [] for value in self.app.extensions.values(): value(deepcopy(event)) def process_error_event(self, event: Event): self.errors.append({"time": self.get_local_time(), "data": event.data}) print(self.get_local_time() + ": ", event.data) def process_log_event(self, event: Event): self.logs[self.get_local_time()] = event.data if self.app.config.get("LOG_OUTPUT"): print(self.get_local_time() + ": ", event.data) for value in self.app.extensions.values(): value(deepcopy(event)) def process_bar_event(self, event: Event): bar = event.data local = self.bar.get(bar.local_symbol) if local is None: self.bar[bar.local_symbol] = {bar.interval: []} else: if self.bar[bar.local_symbol].get(bar.interval) is None: self.bar[bar.local_symbol] = {bar.interval: []} self.bar[bar.local_symbol][bar.interval].append(bar) for value in self.app.extensions.values(): value(deepcopy(event)) def process_tick_event(self, event: Event): """""" tick = event.data self.ticks[tick.local_symbol] = tick symbol = tick.symbol self.position_manager.update_tick(tick) # 生成datetime对象 if not tick.datetime: if '.' in tick.time: tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f') else: tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S') bm = self.generators.get(symbol, None) if bm: bm.update_tick(tick) if not bm: self.generators[symbol] = generator(self.event_engine) for value in self.app.extensions.values(): value(deepcopy(event)) def process_order_event(self, event: Event): """""" order = event.data self.orders[order.local_order_id] = order # If order is active, then update data in dict. if order._is_active(): self.active_orders[order.local_order_id] = order # Otherwise, pop inactive order from in dict elif order.local_order_id in self.active_orders: self.active_orders.pop(order.local_order_id) self.position_manager.update_order(order) for value in self.app.extensions.values(): value(deepcopy(event)) def process_trade_event(self, event: Event): """""" trade = event.data self.trades[trade.local_trade_id] = trade self.position_manager.update_trade(trade) for value in self.app.extensions.values(): value(deepcopy(event)) def process_position_event(self, event: Event): """""" position = event.data self.positions[position.local_position_id] = position self.position_manager.update_position(position) for value in self.app.extensions.values(): value(deepcopy(event)) def process_account_event(self, event: Event): """""" account = event.data self.accounts[account.local_account_id] = account for value in self.app.extensions.values(): value(deepcopy(event)) def process_contract_event(self, event: Event): """""" contract = event.data self.contracts[contract.local_symbol] = contract for value in self.app.extensions.values(): value(deepcopy(event)) def get_shared(self, symbol): return self.shared.get(symbol, None) def get_all_shared(self): return self.shared def get_bar(self, local_symbol): return self.bar.get(local_symbol, None) def get_all_bar(self): return self.bar def get_tick(self, local_symbol): return self.ticks.get(local_symbol, None) def get_order(self, local_order_id): return self.orders.get(local_order_id, None) def get_trade(self, local_trade_id): return self.trades.get(local_trade_id, None) def get_position(self, local_position_id): return self.positions.get(local_position_id, None) def get_account(self, local_account_id): return self.accounts.get(local_account_id, None) def get_contract(self, local_symbol): return self.contracts.get(local_symbol, None) def get_all_ticks(self): """ Get all tick data. """ return list(self.ticks.values()) def get_all_orders(self): """ Get all order data. """ return list(self.orders.values()) def get_all_trades(self): """ Get all trade data. """ return list(self.trades.values()) def get_all_positions(self): """ Get all position data. """ return self.position_manager.get_all_positions() def get_errors(self): return self.errors def get_new_error(self): return self.errors[-1] def get_all_accounts(self): """ Get all account data. """ return list(self.accounts.values()) def get_all_contracts(self): """ Get all contract data. """ return list(self.contracts.values()) def get_all_active_orders(self, local_symbol: str = ""): if not local_symbol: return list(self.active_orders.values()) else: active_orders = [ order for order in self.active_orders.values() if order.local_symbol == local_symbol ] return active_orders class AsyncRecorder(object): """ data center """ def __init__(self, app, event_engine): """""" self.bar = {} self.ticks = {} self.orders = {} self.trades = {} self.positions = {} self.accounts = {} self.contracts = {} self.logs = {} self.errors = [] self.shared = {} self.generators = {} self.active_orders = {} self.event_engine = event_engine self.register_event() self.app = app self.position_manager = LocalPositionManager(app=self.app) @staticmethod def get_local_time(): from datetime import datetime return datetime.now().strftime('%Y-%m-%d %H:%M:%S') def register_event(self): """bind process function""" self.event_engine.register(EVENT_TICK, self.process_tick_event) self.event_engine.register(EVENT_ORDER, self.process_order_event) self.event_engine.register(EVENT_TRADE, self.process_trade_event) self.event_engine.register(EVENT_POSITION, self.process_position_event) self.event_engine.register(EVENT_ACCOUNT, self.process_account_event) self.event_engine.register(EVENT_CONTRACT, self.process_contract_event) self.event_engine.register(EVENT_BAR, self.process_bar_event) self.event_engine.register(EVENT_LOG, self.process_log_event) self.event_engine.register(EVENT_ERROR, self.process_error_event) self.event_engine.register(EVENT_SHARED, self.process_shared_event) async def process_shared_event(self, event): if self.shared.get(event.data.local_symbol, None) is not None: self.shared[event.data.local_symbol].append(event.data) else: self.shared[event.data.local_symbol] = [] for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_error_event(self, event: Event): self.errors.append({"time": self.get_local_time(), "data": event.data}) print(self.get_local_time() + ": ", event.data) async def process_log_event(self, event: Event): self.logs[self.get_local_time()] = event.data if self.app.config.get("LOG_OUTPUT"): print(self.get_local_time() + ": ", event.data) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_bar_event(self, event: Event): bar = event.data local = self.bar.get(bar.local_symbol) if local is None: self.bar[bar.local_symbol] = {bar.interval: []} else: if self.bar[bar.local_symbol].get(bar.interval) is None: self.bar[bar.local_symbol] = {bar.interval: []} self.bar[bar.local_symbol][bar.interval].append(bar) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_tick_event(self, event: Event): """""" tick = event.data self.ticks[tick.local_symbol] = tick symbol = tick.symbol self.position_manager.update_tick(tick) # 生成datetime对象 if not tick.datetime: if '.' in tick.time: tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f') else: tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S') bm = self.generators.get(symbol, None) if bm: bm.update_tick(tick) if not bm: self.generators[symbol] = generator(self.event_engine) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_order_event(self, event: Event): """""" order = event.data self.orders[order.local_order_id] = order # If order is active, then update data in dict. if order._is_active(): self.active_orders[order.local_order_id] = order # Otherwise, pop inactive order from in dict elif order.local_order_id in self.active_orders: self.active_orders.pop(order.local_order_id) self.position_manager.update_order(order) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_trade_event(self, event: Event): """""" trade = event.data self.trades[trade.local_trade_id] = trade self.position_manager.update_trade(trade) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_position_event(self, event: Event): """""" position = event.data self.positions[position.local_position_id] = position self.position_manager.update_position(position) for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_account_event(self, event: Event): """""" account = event.data self.accounts[account.local_account_id] = account for value in self.app.extensions.values(): await value(deepcopy(event)) async def process_contract_event(self, event: Event): """""" contract = event.data self.contracts[contract.local_symbol] = contract for value in self.app.extensions.values(): await value(deepcopy(event)) def get_shared(self, symbol): return self.shared.get(symbol, None) def get_all_shared(self): return self.shared def get_bar(self, local_symbol): return self.bar.get(local_symbol, None) def get_all_bar(self): return self.bar def get_tick(self, local_symbol): return self.ticks.get(local_symbol, None) def get_order(self, local_order_id): return self.orders.get(local_order_id, None) def get_trade(self, local_trade_id): return self.trades.get(local_trade_id, None) def get_position(self, local_position_id): return self.positions.get(local_position_id, None) def get_account(self, local_account_id): return self.accounts.get(local_account_id, None) def get_contract(self, local_symbol): return self.contracts.get(local_symbol, None) def get_all_ticks(self): """ Get all tick data. """ return list(self.ticks.values()) def get_all_orders(self): """ Get all order data. """ return list(self.orders.values()) def get_all_trades(self): """ Get all trade data. """ return list(self.trades.values()) def get_all_positions(self): """ Get all position data. """ return self.position_manager.get_all_positions() def get_errors(self): return self.errors def get_new_error(self): return self.errors[-1] def get_all_accounts(self): """ Get all account data. """ return list(self.accounts.values()) def get_all_contracts(self): """ Get all contract data. """ return list(self.contracts.values()) def get_all_active_orders(self, local_symbol: str = ""): if not local_symbol: return list(self.active_orders.values()) else: active_orders = [ order for order in self.active_orders.values() if order.local_symbol == local_symbol ] return active_orders
33.360087
105
0.621367
1,903
15,379
4.820809
0.050447
0.045128
0.05799
0.050142
0.962612
0.962612
0.962612
0.962612
0.962612
0.962612
0
0.000177
0.266727
15,379
460
106
33.432609
0.813337
0.034202
0
0.905363
0
0
0.010383
0
0
0
0
0
0
1
0.170347
false
0
0.025237
0.07571
0.334385
0.012618
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
077ef5a456f4ef84d1fb90d05e9ac1302b4a87c9
5,722
py
Python
ElectroWeakAnalysis/ZMuMu/python/zSelection_cfi.py
SWuchterl/cmssw
769b4a7ef81796579af7d626da6039dfa0347b8e
[ "Apache-2.0" ]
6
2017-09-08T14:12:56.000Z
2022-03-09T23:57:01.000Z
ElectroWeakAnalysis/ZMuMu/python/zSelection_cfi.py
SWuchterl/cmssw
769b4a7ef81796579af7d626da6039dfa0347b8e
[ "Apache-2.0" ]
545
2017-09-19T17:10:19.000Z
2022-03-07T16:55:27.000Z
ElectroWeakAnalysis/ZMuMu/python/zSelection_cfi.py
SWuchterl/cmssw
769b4a7ef81796579af7d626da6039dfa0347b8e
[ "Apache-2.0" ]
14
2017-10-04T09:47:21.000Z
2019-10-23T18:04:45.000Z
import FWCore.ParameterSet.Config as cms zSelectionLoose = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 15 & daughter(1).pt > 15 & abs(daughter(0).eta)<2.4 & abs(daughter(1).eta)<2.4 & mass > 0"), isoCut = cms.double(1000.), ptThreshold = cms.untracked.double(1.5), etEcalThreshold = cms.untracked.double(0.2), etHcalThreshold = cms.untracked.double(0.5), deltaRVetoTrk = cms.untracked.double(0.015), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.25), deltaRHcal = cms.untracked.double(0.25), alpha = cms.untracked.double(0.), beta = cms.untracked.double(-0.75), relativeIsolation = cms.bool(False) # For standard isolation (I_Tkr<3GeV) choose this configuration: # isoCut = cms.double(3.), # ptThreshold = cms.untracked.double(1.5), # etEcalThreshold = cms.untracked.double(0.2), # etHcalThreshold = cms.untracked.double(0.5), # deltaRVetoTrk = cms.untracked.double(0.015), # deltaRTrk = cms.untracked.double(0.3), # deltaREcal = cms.untracked.double(0.25), # deltaRHcal = cms.untracked.double(0.25), # alpha = cms.untracked.double(0.), # beta = cms.untracked.double(-0.75), # relativeIsolation = cms.bool(False) ) ##### I = alpha /2 (( 1 + beta) HCal + (1 - beta) Ecal ) + (1 - alpha)Trk ####### combined isolation #zSelection = cms.PSet( # cut = cms.string("charge = 0 & daughter(0).pt > 20. & daughter(1).pt > 20. & abs(daughter(0).eta)<2.1 & abs(daughter(1).eta)<2.1 & mass > 0"), # isoCut = cms.double(.45), ### with alpha = 2/3 and beta =0, so 0.45 is equivalent to 0.15...... # ptThreshold = cms.untracked.double(0.), # etEcalThreshold = cms.untracked.double(0.), # etHcalThreshold = cms.untracked.double(0.), # deltaRVetoTrk = cms.untracked.double(0.01), # deltaRTrk = cms.untracked.double(0.3), # deltaREcal = cms.untracked.double(0.3), # deltaRHcal = cms.untracked.double(0.3), # alpha = cms.untracked.double(0.666667), # beta = cms.untracked.double(0.0), # relativeIsolation = cms.bool(True) # ) #### tracker isolation zSelection = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 20. & daughter(1).pt > 20. & abs(daughter(0).eta)<2.1 & abs(daughter(1).eta)<2.1 & mass > 0"), isoCut = cms.double(3.00), ptThreshold = cms.untracked.double(0.), etEcalThreshold = cms.untracked.double(0.), etHcalThreshold = cms.untracked.double(0.), deltaRVetoTrk = cms.untracked.double(0.01), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.3), deltaRHcal = cms.untracked.double(0.3), alpha = cms.untracked.double(0.), beta = cms.untracked.double(0.0), relativeIsolation = cms.bool(False) ) ### region A: |eta|<2.1, region B: 2.1< |eta| <2.4 zSelectionABLoose = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 15 & daughter(1).pt > 15 & ( (abs(daughter(0).eta)<2.1 & 2.1< abs(daughter(1).eta)<2.4 ) || (abs(daughter(1).eta)<2.1 & 2.1< abs(daughter(0).eta)<2.4 ) ) & mass > 0"), isoCut = cms.double(1000.), ptThreshold = cms.untracked.double(1.5), etEcalThreshold = cms.untracked.double(0.2), etHcalThreshold = cms.untracked.double(0.5), deltaRVetoTrk = cms.untracked.double(0.015), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.25), deltaRHcal = cms.untracked.double(0.25), alpha = cms.untracked.double(0.), beta = cms.untracked.double(-0.75), relativeIsolation = cms.bool(False) ) zSelectionAB = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 20. & daughter(1).pt > 20. & ( (abs(daughter(0).eta)<2.1 & 2.1< abs(daughter(1).eta)<2.4 ) || (abs(daughter(1).eta)<2.1 & 2.1< abs(daughter(0).eta)<2.4 ) ) & mass > 0"), isoCut = cms.double(1000.), ptThreshold = cms.untracked.double(1.5), etEcalThreshold = cms.untracked.double(0.2), etHcalThreshold = cms.untracked.double(0.5), deltaRVetoTrk = cms.untracked.double(0.015), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.25), deltaRHcal = cms.untracked.double(0.25), alpha = cms.untracked.double(0.), beta = cms.untracked.double(-0.75), relativeIsolation = cms.bool(False) ) zSelectionBBLoose = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 15 & daughter(1).pt > 15 & ( 2.1< abs(daughter(0).eta)<2.4 & 2.1< abs(daughter(1).eta)<2.4 ) & mass > 0"), isoCut = cms.double(1000.), ptThreshold = cms.untracked.double(1.5), etEcalThreshold = cms.untracked.double(0.2), etHcalThreshold = cms.untracked.double(0.5), deltaRVetoTrk = cms.untracked.double(0.015), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.25), deltaRHcal = cms.untracked.double(0.25), alpha = cms.untracked.double(0.), beta = cms.untracked.double(-0.75), relativeIsolation = cms.bool(False) ) zSelectionBB = cms.PSet( cut = cms.string("charge = 0 & daughter(0).pt > 20 & daughter(1).pt > 20 & ( 2.1< abs(daughter(0).eta)<2.4 & 2.1< abs(daughter(1).eta)<2.4 ) & mass > 0"), isoCut = cms.double(1000.), ptThreshold = cms.untracked.double(1.5), etEcalThreshold = cms.untracked.double(0.2), etHcalThreshold = cms.untracked.double(0.5), deltaRVetoTrk = cms.untracked.double(0.015), deltaRTrk = cms.untracked.double(0.3), deltaREcal = cms.untracked.double(0.25), deltaRHcal = cms.untracked.double(0.25), alpha = cms.untracked.double(0.), beta = cms.untracked.double(-0.75), relativeIsolation = cms.bool(False) ) goodZTight = cms.EDFilter( "ZToMuMuIsolatedIDSelector", zSelection, src = cms.InputTag("goodZ"), filter = cms.bool(True) )
39.462069
222
0.647326
805
5,722
4.6
0.103106
0.233324
0.349987
0.338644
0.891169
0.891169
0.891169
0.891169
0.890899
0.871186
0
0.071444
0.168298
5,722
144
223
39.736111
0.706661
0.237854
0
0.637363
0
0.065934
0.215727
0.095801
0
0
0
0
0
1
0
false
0
0.010989
0
0.010989
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
07861f3ea99112ced4f3d202f67e445824038884
15,099
py
Python
gluoncv/data/pascal_voc/detection.py
JiangongWang/mean-teacher-cross-domain-detection
c52b8b2e22e8ff30ead1bef82409d41f52883ccd
[ "Apache-2.0" ]
36
2019-12-25T04:59:49.000Z
2022-03-17T07:24:49.000Z
gluoncv/data/pascal_voc/detection.py
JiangongWang/mean-teacher-cross-domain-detection
c52b8b2e22e8ff30ead1bef82409d41f52883ccd
[ "Apache-2.0" ]
1
2020-02-25T05:56:19.000Z
2020-05-15T17:03:59.000Z
gluoncv/data/pascal_voc/detection.py
JiangongWang/mean-teacher-cross-domain-detection
c52b8b2e22e8ff30ead1bef82409d41f52883ccd
[ "Apache-2.0" ]
9
2019-12-25T05:00:33.000Z
2021-10-01T14:23:51.000Z
"""Pascal VOC object detection dataset.""" from __future__ import absolute_import from __future__ import division import os import logging import numpy as np try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET import mxnet as mx from ..base import VisionDataset class VOCDetection(VisionDataset): """Pascal VOC detection Dataset. Parameters ---------- root : str, default '~/mxnet/datasets/voc' Path to folder storing the dataset. splits : list of tuples, default ((2007, 'trainval'), (2012, 'trainval')) List of combinations of (year, name) For years, candidates can be: 2007, 2012. For names, candidates can be: 'train', 'val', 'trainval', 'test'. transform : callable, defaut None A function that takes data and label and transforms them. Refer to :doc:`./transforms` for examples. A transform function for object detection should take label into consideration, because any geometric modification will require label to be modified. index_map : dict, default None In default, the 20 classes are mapped into indices from 0 to 19. We can customize it by providing a str to int dict specifying how to map class names to indicies. Use by advanced users only, when you want to swap the orders of class labels. preload_label : bool, default True If True, then parse and load all labels into memory during initialization. It often accelerate speed but require more memory usage. Typical preloaded labels took tens of MB. You only need to disable it when your dataset is extreamly large. """ CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') def __init__(self, root=os.path.join('~', '.mxnet', 'datasets', 'voc'), splits=((2007, 'trainval'), (2012, 'trainval')), transform=None, index_map=None, preload_label=True): super(VOCDetection, self).__init__(root) self._im_shapes = {} self._root = os.path.expanduser(root) self._transform = transform self._splits = splits self._items = self._load_items(splits) self._anno_path = os.path.join('{}', 'Annotations', '{}.xml') self._image_path = os.path.join('{}', 'JPEGImages', '{}.jpg') self.index_map = index_map or dict(zip(self.classes, range(self.num_class))) self._label_cache = self._preload_labels() if preload_label else None def __str__(self): detail = ','.join([str(s[0]) + s[1] for s in self._splits]) return self.__class__.__name__ + '(' + detail + ')' @property def classes(self): """Category names.""" return type(self).CLASSES def __len__(self): return len(self._items) def __getitem__(self, idx): img_id = self._items[idx] img_path = self._image_path.format(*img_id) label = self._label_cache[idx] if self._label_cache else self._load_label(idx) img = mx.image.imread(img_path, 1) if self._transform is not None: return self._transform(img, label) return img, label def _load_items(self, splits): """Load individual image indices from splits.""" ids = [] for year, name in splits: root = os.path.join(self._root, 'VOC' + str(year)) lf = os.path.join(root, 'ImageSets', 'Main', name + '.txt') with open(lf, 'r') as f: ids += [(root, line.strip()) for line in f.readlines()] return ids def _load_label(self, idx): """Parse xml file and return labels.""" img_id = self._items[idx] anno_path = self._anno_path.format(*img_id) root = ET.parse(anno_path).getroot() size = root.find('size') width = float(size.find('width').text) height = float(size.find('height').text) if idx not in self._im_shapes: # store the shapes for later usage self._im_shapes[idx] = (width, height) label = [] for obj in root.iter('object'): difficult = int(obj.find('difficult').text) cls_name = obj.find('name').text.strip().lower() if cls_name not in self.classes: continue cls_id = self.index_map[cls_name] xml_box = obj.find('bndbox') xmin = (float(xml_box.find('xmin').text) - 1) ymin = (float(xml_box.find('ymin').text) - 1) xmax = (float(xml_box.find('xmax').text) - 1) ymax = (float(xml_box.find('ymax').text) - 1) try: self._validate_label(xmin, ymin, xmax, ymax, width, height) except AssertionError as e: raise RuntimeError("Invalid label at {}, {}".format(anno_path, e)) label.append([xmin, ymin, xmax, ymax, cls_id, difficult]) return np.array(label) def _validate_label(self, xmin, ymin, xmax, ymax, width, height): """Validate labels.""" assert 0 <= xmin < width, "xmin must in [0, {}), given {}".format(width, xmin) assert 0 <= ymin < height, "ymin must in [0, {}), given {}".format(height, ymin) assert xmin < xmax <= width, "xmax must in (xmin, {}], given {}".format(width, xmax) assert ymin < ymax <= height, "ymax must in (ymin, {}], given {}".format(height, ymax) def _preload_labels(self): """Preload all labels into memory.""" logging.debug("Preloading %s labels into memory...", str(self)) return [self._load_label(idx) for idx in range(len(self))] class CityScapeDetection(VisionDataset): """Pascal VOC detection Dataset. Parameters ---------- root : str, default '~/mxnet/datasets/voc' Path to folder storing the dataset. splits : list of tuples, default ((2007, 'trainval'), (2012, 'trainval')) List of combinations of (year, name) For years, candidates can be: 2007, 2012. For names, candidates can be: 'train', 'val', 'trainval', 'test'. transform : callable, defaut None A function that takes data and label and transforms them. Refer to :doc:`./transforms` for examples. A transform function for object detection should take label into consideration, because any geometric modification will require label to be modified. index_map : dict, default None In default, the 20 classes are mapped into indices from 0 to 19. We can customize it by providing a str to int dict specifying how to map class names to indicies. Use by advanced users only, when you want to swap the orders of class labels. preload_label : bool, default True If True, then parse and load all labels into memory during initialization. It often accelerate speed but require more memory usage. Typical preloaded labels took tens of MB. You only need to disable it when your dataset is extreamly large. """ CLASSES = ["person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"] def __init__(self, root=os.path.join('~', '.mxnet', 'datasets', 'voc'), splits="", transform=None, index_map=None, preload_label=True, min_dataset_size=-1): super(CityScapeDetection, self).__init__(root) self._im_shapes = {} self.min_dataset_size = min_dataset_size self._root = os.path.expanduser(root) self._transform = transform self._splits = splits self._items = self._load_items(splits) # self._image_path = os.path.join('{}') self.index_map = index_map or dict(zip(self.classes, range(self.num_class))) self._label_cache = self._preload_labels() if preload_label else None def __str__(self): detail = self._splits return self.__class__.__name__ + '(' + detail + ')' @property def classes(self): """Category names.""" return type(self).CLASSES def __len__(self): return len(self._items) def __getitem__(self, idx): img_id = self._items[idx] # img_path = self._image_path.format(*img_id) img_path = img_id[0] label = self._label_cache[idx] if self._label_cache else self._load_label(idx) img = mx.image.imread(img_path, 1) if self._transform is not None: return self._transform(img, label) return img, label def _load_items(self, splits): """Load individual image indices from splits.""" ids = [] with open(os.path.join(self._root, splits), 'r') as f: for line in f.readlines(): line = line.strip().split(" ") path = os.path.join(self._root, line[0]) others = line[1:] ids.append([path] + others) if self.min_dataset_size > 0: print("{}: padding from : {} to {}".format(self._splits, len(ids), self.min_dataset_size)) while (len(ids)) < self.min_dataset_size: ids = ids + ids ids = ids[:self.min_dataset_size] return ids def _load_label(self, idx): """Parse xml file and return labels.""" img_id = self._items[idx] annotation_data = img_id[1:] annotation_data = np.array([float(k) for k in annotation_data]) annotation_data = np.reshape(annotation_data, newshape=(-1, 5)) annotation_data = np.concatenate((annotation_data, np.zeros(shape=(annotation_data.shape[0], 1))), axis=1) return np.array(annotation_data) def _validate_label(self, xmin, ymin, xmax, ymax, width, height): """Validate labels.""" assert 0 <= xmin < width, "xmin must in [0, {}), given {}".format(width, xmin) assert 0 <= ymin < height, "ymin must in [0, {}), given {}".format(height, ymin) assert xmin < xmax <= width, "xmax must in (xmin, {}], given {}".format(width, xmax) assert ymin < ymax <= height, "ymax must in (ymin, {}], given {}".format(height, ymax) def _preload_labels(self): """Preload all labels into memory.""" logging.debug("Preloading %s labels into memory...", str(self)) return [self._load_label(idx) for idx in range(len(self))] class SIM10kDetection(VisionDataset): """Pascal VOC detection Dataset. Parameters ---------- root : str, default '~/mxnet/datasets/voc' Path to folder storing the dataset. splits : list of tuples, default ((2007, 'trainval'), (2012, 'trainval')) List of combinations of (year, name) For years, candidates can be: 2007, 2012. For names, candidates can be: 'train', 'val', 'trainval', 'test'. transform : callable, defaut None A function that takes data and label and transforms them. Refer to :doc:`./transforms` for examples. A transform function for object detection should take label into consideration, because any geometric modification will require label to be modified. index_map : dict, default None In default, the 20 classes are mapped into indices from 0 to 19. We can customize it by providing a str to int dict specifying how to map class names to indicies. Use by advanced users only, when you want to swap the orders of class labels. preload_label : bool, default True If True, then parse and load all labels into memory during initialization. It often accelerate speed but require more memory usage. Typical preloaded labels took tens of MB. You only need to disable it when your dataset is extreamly large. """ CLASSES = ["car", ] def __init__(self, root=os.path.join('~', '.mxnet', 'datasets', 'voc'), splits="", transform=None, index_map=None, preload_label=True, min_dataset_size=-1): super(SIM10kDetection, self).__init__(root) self._im_shapes = {} self.min_dataset_size = min_dataset_size self._root = os.path.expanduser(root) self._transform = transform self._splits = splits self._items = self._load_items(splits) # self._image_path = os.path.join('{}') self.index_map = index_map or dict(zip(self.classes, range(self.num_class))) self._label_cache = self._preload_labels() if preload_label else None def __str__(self): detail = self._splits return self.__class__.__name__ + '(' + detail + ')' @property def classes(self): """Category names.""" return type(self).CLASSES def __len__(self): return len(self._items) def __getitem__(self, idx): img_id = self._items[idx] # img_path = self._image_path.format(*img_id) img_path = img_id[0] label = self._label_cache[idx] if self._label_cache else self._load_label(idx) img = mx.image.imread(img_path, 1) if self._transform is not None: return self._transform(img, label) return img, label def _load_items(self, splits): """Load individual image indices from splits.""" ids = [] with open(os.path.join(self._root, splits), 'r') as f: for line in f.readlines(): line = line.strip().split(" ") path = os.path.join(self._root, line[0]) others = line[1:] ids.append([path] + others) if self.min_dataset_size > 0: print("{}: padding from : {} to {}".format(self._splits, len(ids), self.min_dataset_size)) while (len(ids)) < self.min_dataset_size: ids = ids + ids ids = ids[:self.min_dataset_size] return ids def _load_label(self, idx): """Parse xml file and return labels.""" img_id = self._items[idx] annotation_data = img_id[1:] annotation_data = np.array([float(k) for k in annotation_data]) annotation_data = np.reshape(annotation_data, newshape=(-1, 5)) annotation_data = np.concatenate((annotation_data, np.zeros(shape=(annotation_data.shape[0], 1))), axis=1) return np.array(annotation_data) def _validate_label(self, xmin, ymin, xmax, ymax, width, height): """Validate labels.""" assert 0 <= xmin < width, "xmin must in [0, {}), given {}".format(width, xmin) assert 0 <= ymin < height, "ymin must in [0, {}), given {}".format(height, ymin) assert xmin < xmax <= width, "xmax must in (xmin, {}], given {}".format(width, xmax) assert ymin < ymax <= height, "ymax must in (ymin, {}], given {}".format(height, ymax) def _preload_labels(self): """Preload all labels into memory.""" logging.debug("Preloading %s labels into memory...", str(self)) return [self._load_label(idx) for idx in range(len(self))]
43.892442
114
0.615074
1,952
15,099
4.588627
0.13627
0.028134
0.021882
0.020096
0.852629
0.848498
0.842916
0.840237
0.835659
0.835659
0
0.010608
0.263262
15,099
343
115
44.020408
0.794588
0.291675
0
0.726829
0
0
0.088995
0
0
0
0
0
0.063415
1
0.131707
false
0
0.04878
0.014634
0.326829
0.009756
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
078e788beb4e94e509d9cf9c9979a01ac4f4fffa
359
py
Python
pyaedt/edb_core/__init__.py
beliaev-maksim/pyaedt
c549de1d0c80f3598afc5475817a332bb6d6df57
[ "MIT" ]
12
2021-07-01T06:35:12.000Z
2021-09-22T15:53:07.000Z
pyaedt/edb_core/__init__.py
beliaev-maksim/pyaedt
c549de1d0c80f3598afc5475817a332bb6d6df57
[ "MIT" ]
111
2021-07-01T16:02:36.000Z
2021-09-29T12:36:44.000Z
pyaedt/edb_core/__init__.py
beliaev-maksim/pyaedt
c549de1d0c80f3598afc5475817a332bb6d6df57
[ "MIT" ]
5
2021-07-09T14:24:59.000Z
2021-09-07T12:42:03.000Z
from __future__ import absolute_import from pyaedt.edb_core.components import Components from pyaedt.edb_core.hfss import EdbHfss from pyaedt.edb_core.nets import EdbNets from pyaedt.edb_core.padstack import EdbPadstacks from pyaedt.edb_core.siwave import EdbSiwave from pyaedt.edb_core.stackup import EdbStackup from pyaedt.edb_core.layout import EdbLayout
35.9
49
0.869081
54
359
5.555556
0.37037
0.233333
0.303333
0.396667
0
0
0
0
0
0
0
0
0.091922
359
9
50
39.888889
0.920245
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
07b6c66d0922c5b856f358ab41ac7c95ea96458f
74,170
py
Python
azure/storage/blob/pageblobservice.py
RobertoPrevato/azure-storage-python
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
[ "Apache-2.0" ]
5
2018-03-21T12:59:53.000Z
2020-11-30T12:24:18.000Z
azure/storage/blob/pageblobservice.py
RobertoPrevato/azure-storage-python
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
[ "Apache-2.0" ]
null
null
null
azure/storage/blob/pageblobservice.py
RobertoPrevato/azure-storage-python
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
[ "Apache-2.0" ]
3
2018-10-09T18:35:19.000Z
2019-03-13T09:43:02.000Z
# ------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- import sys from os import path from azure.storage.common._common_conversion import ( _int_to_str, _to_str, _datetime_to_utc_string, _get_content_md5, ) from azure.storage.common._constants import ( SERVICE_HOST_BASE, DEFAULT_PROTOCOL, ) from azure.storage.common._error import ( _validate_not_none, _validate_type_bytes, _validate_encryption_required, _validate_encryption_unsupported, _ERROR_VALUE_NEGATIVE, ) from azure.storage.common._http import HTTPRequest from azure.storage.common._serialization import ( _get_data_bytes_only, _add_metadata_headers, ) from ._deserialization import ( _convert_xml_to_page_ranges, _parse_page_properties, _parse_base_properties, ) from ._encryption import _generate_blob_encryption_data from ._error import ( _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, ) from ._serialization import ( _get_path, _validate_and_format_range_headers, ) from ._upload_chunking import ( _PageBlobChunkUploader, _upload_blob_chunks, ) from .baseblobservice import BaseBlobService from .models import ( _BlobTypes, ResourceProperties) from io import BytesIO # Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT _PAGE_ALIGNMENT = 512 class PageBlobService(BaseBlobService): ''' Page blobs are a collection of 512-byte pages optimized for random read and write operations. To create a page blob, you initialize the page blob and specify the maximum size the page blob will grow. To add or update the contents of a page blob, you write a page or pages by specifying an offset and a range that align to 512-byte page boundaries. A write to a page blob can overwrite just one page, some pages, or up to 4 MB of the page blob. Writes to page blobs happen in-place and are immediately committed to the blob. The maximum size for a page blob is 1 TB. :ivar int MAX_PAGE_SIZE: The size of the pages put by create_blob_from_* methods. Smaller pages may be put if there is less data provided. The maximum page size the service supports is 4MB. ''' MAX_PAGE_SIZE = 4 * 1024 * 1024 def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): ''' :param str account_name: The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. :param str account_key: The storage account key. This is used for shared key authentication. If neither account key or sas token is specified, anonymous access will be used. :param str sas_token: A shared access signature token to use to authenticate requests instead of the account key. If account key and sas token are both specified, account key will be used to sign. If neither are specified, anonymous access will be used. :param bool is_emulated: Whether to use the emulator. Defaults to False. If specified, will override all other parameters besides connection string and request session. :param str protocol: The protocol to use for requests. Defaults to https. :param str endpoint_suffix: The host base component of the url, minus the account name. Defaults to Azure (core.windows.net). Override this to use the China cloud (core.chinacloudapi.cn). :param str custom_domain: The custom domain to use. This can be set in the Azure Portal. For example, 'www.mydomain.com'. :param requests.Session request_session: The session object to use for http requests. :param str connection_string: If specified, this will override all other parameters besides request session. See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format. :param int socket_timeout: If specified, this will override the default socket timeout. The timeout specified is in seconds. See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. ''' self.blob_type = _BlobTypes.PageBlob super(PageBlobService, self).__init__( account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, custom_domain, request_session, connection_string, socket_timeout) async def create_blob( self, container_name, blob_name, content_length, content_settings=None, sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): ''' Creates a new Page Blob. See create_blob_from_* for high level functions that handle the creation and upload of large blobs with automatic chunking and progress notifications. :param str container_name: Name of existing container. :param str blob_name: Name of blob to create or update. :param int content_length: Required. This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. :param ~azure.storage.blob.models.ContentSettings content_settings: ContentSettings object used to set properties on the blob. :param int sequence_number: The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1.The default value is 0. :param metadata: Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. :param PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :return: ETag and last modified properties for the new Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) return await self._create_blob( container_name, blob_name, content_length, content_settings=content_settings, sequence_number=sequence_number, metadata=metadata, lease_id=lease_id, premium_page_blob_tier=premium_page_blob_tier, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, if_match=if_match, if_none_match=if_none_match, timeout=timeout ) async def incremental_copy_blob(self, container_name, blob_name, copy_source, metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None, destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, source_lease_id=None, timeout=None): ''' Copies an incremental copy of a blob asynchronously. This operation returns a copy operation properties object, including a copy ID you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. The source blob for an incremental copy operation must be a page blob. Call get_blob_properties on the destination blob to check the status of the copy operation. The final blob will be committed when the copy completes. :param str container_name: Name of the destination container. The container must exist. :param str blob_name: Name of the destination blob. If the destination blob exists, it will be overwritten. Otherwise, it will be created. :param str copy_source: A URL of up to 2 KB in length that specifies an Azure page blob. The value should be URL-encoded as it would appear in a request URI. The copy source must be a snapshot and include a valid SAS token or be public. Example: https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken :param metadata: Name-value pairs associated with the blob as metadata. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. :type metadata: dict(str, str). :param datetime destination_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the destination blob has been modified since the specified date/time. If the destination blob has not been modified, the Blob service returns status code 412 (Precondition Failed). :param datetime destination_if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the destination blob has not been modified since the specified ate/time. If the destination blob has been modified, the Blob service returns status code 412 (Precondition Failed). :param ETag destination_if_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to copy the blob only if the specified ETag value matches the ETag value for an existing destination blob. If the ETag for the destination blob does not match the ETag specified for If-Match, the Blob service returns status code 412 (Precondition Failed). :param ETag destination_if_none_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to copy the blob only if the specified ETag value does not match the ETag value for the destination blob. Specify the wildcard character (*) to perform the operation only if the destination blob does not exist. If the specified condition isn't met, the Blob service returns status code 412 (Precondition Failed). :param str destination_lease_id: The lease ID specified for this header must match the lease ID of the destination blob. If the request does not include the lease ID or it is not valid, the operation fails with status code 412 (Precondition Failed). :param str source_lease_id: Specify this to perform the Copy Blob operation only if the lease ID given matches the active lease ID of the source blob. :param int timeout: The timeout parameter is expressed in seconds. :return: Copy operation properties such as status, source, and ID. :rtype: :class:`~azure.storage.blob.models.CopyProperties` ''' return await self._copy_blob(container_name, blob_name, copy_source, metadata, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None, destination_if_modified_since=destination_if_modified_since, destination_if_unmodified_since=destination_if_unmodified_since, destination_if_match=destination_if_match, destination_if_none_match=destination_if_none_match, destination_lease_id=destination_lease_id, source_lease_id=source_lease_id, timeout=timeout, incremental_copy=True) async def update_page( self, container_name, blob_name, page, start_range, end_range, validate_content=False, lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None, if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' Updates a range of pages. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param bytes page: Content of the page. :param int start_range: Start of byte range to use for writing to a section of the blob. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. :param int end_range: End of byte range to use for writing to a section of the blob. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. :param bool validate_content: If true, calculates an MD5 hash of the page content. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the blob. :param str lease_id: Required if the blob has an active lease. :param int if_sequence_number_lte: If the blob's sequence number is less than or equal to the specified value, the request proceeds; otherwise it fails. :param int if_sequence_number_lt: If the blob's sequence number is less than the specified value, the request proceeds; otherwise it fails. :param int if_sequence_number_eq: If the blob's sequence number is equal to the specified value, the request proceeds; otherwise it fails. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to write the page only if the blob's ETag value matches the value specified. If the values do not match, the Blob service fails. :param str if_none_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to write the page only if the blob's ETag value does not match the value specified. If the values are identical, the Blob service fails. :param int timeout: The timeout parameter is expressed in seconds. :return: ETag and last modified properties for the updated Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) return await self._update_page( container_name, blob_name, page, start_range, end_range, validate_content=validate_content, lease_id=lease_id, if_sequence_number_lte=if_sequence_number_lte, if_sequence_number_lt=if_sequence_number_lt, if_sequence_number_eq=if_sequence_number_eq, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, if_match=if_match, if_none_match=if_none_match, timeout=timeout ) async def clear_page( self, container_name, blob_name, start_range, end_range, lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None, if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' Clears a range of pages. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param int start_range: Start of byte range to use for writing to a section of the blob. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. :param int end_range: End of byte range to use for writing to a section of the blob. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. :param str lease_id: Required if the blob has an active lease. :param int if_sequence_number_lte: If the blob's sequence number is less than or equal to the specified value, the request proceeds; otherwise it fails. :param int if_sequence_number_lt: If the blob's sequence number is less than the specified value, the request proceeds; otherwise it fails. :param int if_sequence_number_eq: If the blob's sequence number is equal to the specified value, the request proceeds; otherwise it fails. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to write the page only if the blob's ETag value matches the value specified. If the values do not match, the Blob service fails. :param str if_none_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to write the page only if the blob's ETag value does not match the value specified. If the values are identical, the Blob service fails. :param int timeout: The timeout parameter is expressed in seconds. :return: ETag and last modified properties for the updated Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'page', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-page-write': 'clear', 'x-ms-lease-id': _to_str(lease_id), 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match) } _validate_and_format_range_headers( request, start_range, end_range, align_to_page=True) return await self._perform_request(request, _parse_page_properties) async def get_page_ranges( self, container_name, blob_name, snapshot=None, start_range=None, end_range=None, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' Returns the list of valid page ranges for a Page Blob or snapshot of a page blob. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param str snapshot: The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve information from. :param int start_range: Start of byte range to use for getting valid page ranges. If no end_range is given, all bytes after the start_range will be searched. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-, etc. :param int end_range: End of byte range to use for getting valid page ranges. If end_range is given, start_range must be provided. This range will return valid page ranges for from the offset start up to offset end. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-, etc. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. :return: A list of valid Page Ranges for the Page Blob. :rtype: list(:class:`~azure.storage.blob.models.PageRange`) ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations(secondary=True) request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'pagelist', 'snapshot': _to_str(snapshot), 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-lease-id': _to_str(lease_id), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match), } if start_range is not None: _validate_and_format_range_headers( request, start_range, end_range, start_range_required=False, end_range_required=False, align_to_page=True) return await self._perform_request(request, _convert_xml_to_page_ranges) async def get_page_ranges_diff( self, container_name, blob_name, previous_snapshot, snapshot=None, start_range=None, end_range=None, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' The response will include only the pages that are different between either a recent snapshot or the current blob and a previous snapshot, including pages that were cleared. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param str previous_snapshot: The snapshot parameter is an opaque DateTime value that specifies a previous blob snapshot to be compared against a more recent snapshot or the current blob. :param str snapshot: The snapshot parameter is an opaque DateTime value that specifies a more recent blob snapshot to be compared against a previous snapshot (previous_snapshot). :param int start_range: Start of byte range to use for getting different page ranges. If no end_range is given, all bytes after the start_range will be searched. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-, etc. :param int end_range: End of byte range to use for getting different page ranges. If end_range is given, start_range must be provided. This range will return valid page ranges for from the offset start up to offset end. Pages must be aligned with 512-byte boundaries, the start offset must be a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid byte ranges are 0-511, 512-, etc. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. :return: A list of different Page Ranges for the Page Blob. :rtype: list(:class:`~azure.storage.blob.models.PageRange`) ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('previous_snapshot', previous_snapshot) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations(secondary=True) request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'pagelist', 'snapshot': _to_str(snapshot), 'prevsnapshot': _to_str(previous_snapshot), 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-lease-id': _to_str(lease_id), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match), } if start_range is not None: _validate_and_format_range_headers( request, start_range, end_range, start_range_required=False, end_range_required=False, align_to_page=True) return await self._perform_request(request, _convert_xml_to_page_ranges) async def set_sequence_number( self, container_name, blob_name, sequence_number_action, sequence_number=None, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' Sets the blob sequence number. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param str sequence_number_action: This property indicates how the service should modify the blob's sequence number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information. :param str sequence_number: This property sets the blob's sequence number. The sequence number is a user-controlled property that you can use to track requests and manage concurrency issues. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. :return: ETag and last modified properties for the updated Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('sequence_number_action', sequence_number_action) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'properties', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-blob-sequence-number': _to_str(sequence_number), 'x-ms-sequence-number-action': _to_str(sequence_number_action), 'x-ms-lease-id': _to_str(lease_id), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match), } return await self._perform_request(request, _parse_page_properties) async def resize_blob( self, container_name, blob_name, content_length, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' Resizes a page blob to the specified size. If the specified value is less than the current size of the blob, then all pages above the specified value are cleared. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param int content_length: Size to resize blob to. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. :return: ETag and last modified properties for the updated Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('content_length', content_length) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'properties', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-blob-content-length': _to_str(content_length), 'x-ms-lease-id': _to_str(lease_id), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match), } return await self._perform_request(request, _parse_page_properties) # ----Convenience APIs----------------------------------------------------- async def create_blob_from_path( self, container_name, blob_name, file_path, content_settings=None, metadata=None, validate_content=False, progress_callback=None, max_connections=2, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): ''' Creates a new blob from a file path, or updates the content of an existing blob, with automatic chunking and progress notifications. :param str container_name: Name of existing container. :param str blob_name: Name of blob to create or update. :param str file_path: Path of the file to upload as the blob content. :param ~azure.storage.blob.models.ContentSettings content_settings: ContentSettings object used to set blob properties. :param metadata: Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) :param bool validate_content: If true, calculates an MD5 hash for each page of the blob. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the blob. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the blob, or None if the total size is unknown. :type progress_callback: func(current, total) :param int max_connections: Maximum number of parallel connections to use. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :param premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :return: ETag and last modified properties for the Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('file_path', file_path) count = path.getsize(file_path) with open(file_path, 'rb') as stream: return await self.create_blob_from_stream( container_name=container_name, blob_name=blob_name, stream=stream, count=count, content_settings=content_settings, metadata=metadata, validate_content=validate_content, progress_callback=progress_callback, max_connections=max_connections, lease_id=lease_id, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, if_match=if_match, if_none_match=if_none_match, timeout=timeout, premium_page_blob_tier=premium_page_blob_tier) async def create_blob_from_stream( self, container_name, blob_name, stream, count, content_settings=None, metadata=None, validate_content=False, progress_callback=None, max_connections=2, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): ''' Creates a new blob from a file/stream, or updates the content of an existing blob, with automatic chunking and progress notifications. :param str container_name: Name of existing container. :param str blob_name: Name of blob to create or update. :param io.IOBase stream: Opened file/stream to upload as the blob content. :param int count: Number of bytes to read from the stream. This is required, a page blob cannot be created if the count is unknown. :param ~azure.storage.blob.models.ContentSettings content_settings: ContentSettings object used to set the blob properties. :param metadata: Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) :param bool validate_content: If true, calculates an MD5 hash for each page of the blob. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the blob. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the blob, or None if the total size is unknown. :type progress_callback: func(current, total) :param int max_connections: Maximum number of parallel connections to use. Note that parallel upload requires the stream to be seekable. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :param premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :return: ETag and last modified properties for the Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('stream', stream) _validate_not_none('count', count) _validate_encryption_required(self.require_encryption, self.key_encryption_key) if count < 0: raise ValueError(_ERROR_VALUE_NEGATIVE.format('count')) if count % _PAGE_ALIGNMENT != 0: raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) cek, iv, encryption_data = None, None, None if self.key_encryption_key is not None: cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) response = await self._create_blob( container_name=container_name, blob_name=blob_name, content_length=count, content_settings=content_settings, metadata=metadata, lease_id=lease_id, premium_page_blob_tier=premium_page_blob_tier, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, if_match=if_match, if_none_match=if_none_match, timeout=timeout, encryption_data=encryption_data ) if count == 0: return response # _upload_blob_chunks returns the block ids for block blobs so resource_properties # is passed as a parameter to get the last_modified and etag for page and append blobs. # this info is not needed for block_blobs since _put_block_list is called after which gets this info resource_properties = ResourceProperties() await _upload_blob_chunks( blob_service=self, container_name=container_name, blob_name=blob_name, blob_size=count, block_size=self.MAX_PAGE_SIZE, stream=stream, max_connections=max_connections, progress_callback=progress_callback, validate_content=validate_content, lease_id=lease_id, uploader_class=_PageBlobChunkUploader, if_match=response.etag, timeout=timeout, content_encryption_key=cek, initialization_vector=iv, resource_properties=resource_properties ) return resource_properties async def create_blob_from_bytes( self, container_name, blob_name, blob, index=0, count=None, content_settings=None, metadata=None, validate_content=False, progress_callback=None, max_connections=2, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): ''' Creates a new blob from an array of bytes, or updates the content of an existing blob, with automatic chunking and progress notifications. :param str container_name: Name of existing container. :param str blob_name: Name of blob to create or update. :param bytes blob: Content of blob as an array of bytes. :param int index: Start index in the byte array. :param int count: Number of bytes to upload. Set to None or negative value to upload all bytes starting from index. :param ~azure.storage.blob.models.ContentSettings content_settings: ContentSettings object used to set blob properties. :param metadata: Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) :param bool validate_content: If true, calculates an MD5 hash for each page of the blob. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the blob. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the blob, or None if the total size is unknown. :type progress_callback: func(current, total) :param int max_connections: Maximum number of parallel connections to use. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :param premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :return: ETag and last modified properties for the Page Blob :rtype: :class:`~azure.storage.blob.models.ResourceProperties` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('blob', blob) _validate_type_bytes('blob', blob) if index < 0: raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) if count is None or count < 0: count = len(blob) - index stream = BytesIO(blob) stream.seek(index) return await self.create_blob_from_stream( container_name=container_name, blob_name=blob_name, stream=stream, count=count, content_settings=content_settings, metadata=metadata, validate_content=validate_content, lease_id=lease_id, progress_callback=progress_callback, max_connections=max_connections, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, if_match=if_match, if_none_match=if_none_match, timeout=timeout, premium_page_blob_tier=premium_page_blob_tier) async def set_premium_page_blob_tier( self, container_name, blob_name, premium_page_blob_tier, timeout=None): ''' Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. :param str container_name: Name of existing container. :param str blob_name: Name of blob to update. :param PremiumPageBlobTier premium_page_blob_tier: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('premium_page_blob_tier', premium_page_blob_tier) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'tier', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-access-tier': _to_str(premium_page_blob_tier) } await self._perform_request(request) async def copy_blob(self, container_name, blob_name, copy_source, metadata=None, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None, destination_if_modified_since=None, destination_if_unmodified_since=None, destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, source_lease_id=None, timeout=None, premium_page_blob_tier=None): ''' Copies a blob asynchronously. This operation returns a copy operation properties object, including a copy ID you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. The source blob for a copy operation must be a page blob. If the destination blob already exists, it must be of the same blob type as the source blob. Any existing destination blob will be overwritten. The destination blob cannot be modified while a copy operation is in progress. When copying from a page blob, the Blob service creates a destination page blob of the source blob's length, initially containing all zeroes. Then the source page ranges are enumerated, and non-empty ranges are copied. If the tier on the source blob is larger than the tier being passed to this copy operation or if the size of the blob exceeds the tier being passed to this copy operation then the operation will fail. You can call get_blob_properties on the destination blob to check the status of the copy operation. The final blob will be committed when the copy completes. :param str container_name: Name of the destination container. The container must exist. :param str blob_name: Name of the destination blob. If the destination blob exists, it will be overwritten. Otherwise, it will be created. :param str copy_source: A URL of up to 2 KB in length that specifies an Azure file or blob. The value should be URL-encoded as it would appear in a request URI. If the source is in another account, the source must either be public or must be authenticated via a shared access signature. If the source is public, no authentication is required. Examples: https://myaccount.blob.core.windows.net/mycontainer/myblob https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime> https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken :param metadata: Name-value pairs associated with the blob as metadata. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. :type metadata: dict(str, str). :param datetime source_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the source blob has been modified since the specified date/time. :param datetime source_if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the source blob has not been modified since the specified date/time. :param ETag source_if_match: An ETag value, or the wildcard character (*). Specify this conditional header to copy the source blob only if its ETag matches the value specified. If the ETag values do not match, the Blob service returns status code 412 (Precondition Failed). This header cannot be specified if the source is an Azure File. :param ETag source_if_none_match: An ETag value, or the wildcard character (*). Specify this conditional header to copy the blob only if its ETag does not match the value specified. If the values are identical, the Blob service returns status code 412 (Precondition Failed). This header cannot be specified if the source is an Azure File. :param datetime destination_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the destination blob has been modified since the specified date/time. If the destination blob has not been modified, the Blob service returns status code 412 (Precondition Failed). :param datetime destination_if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this conditional header to copy the blob only if the destination blob has not been modified since the specified date/time. If the destination blob has been modified, the Blob service returns status code 412 (Precondition Failed). :param ETag destination_if_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to copy the blob only if the specified ETag value matches the ETag value for an existing destination blob. If the ETag for the destination blob does not match the ETag specified for If-Match, the Blob service returns status code 412 (Precondition Failed). :param ETag destination_if_none_match: An ETag value, or the wildcard character (*). Specify an ETag value for this conditional header to copy the blob only if the specified ETag value does not match the ETag value for the destination blob. Specify the wildcard character (*) to perform the operation only if the destination blob does not exist. If the specified condition isn't met, the Blob service returns status code 412 (Precondition Failed). :param str destination_lease_id: The lease ID specified for this header must match the lease ID of the destination blob. If the request does not include the lease ID or it is not valid, the operation fails with status code 412 (Precondition Failed). :param str source_lease_id: Specify this to perform the Copy Blob operation only if the lease ID given matches the active lease ID of the source blob. :param int timeout: The timeout parameter is expressed in seconds. :param PageBlobTier premium_page_blob_tier: A page blob tier value to set on the destination blob. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts. If the tier on the source blob is larger than the tier being passed to this copy operation or if the size of the blob exceeds the tier being passed to this copy operation then the operation will fail. :return: Copy operation properties such as status, source, and ID. :rtype: :class:`~azure.storage.blob.models.CopyProperties` ''' return await self._copy_blob(container_name, blob_name, copy_source, metadata, premium_page_blob_tier, source_if_modified_since, source_if_unmodified_since, source_if_match, source_if_none_match, destination_if_modified_since, destination_if_unmodified_since, destination_if_match, destination_if_none_match, destination_lease_id, source_lease_id, timeout, False) # -----Helper methods----------------------------------------------------- async def _create_blob( self, container_name, blob_name, content_length, content_settings=None, sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, encryption_data=None): ''' See create_blob for more details. This helper method allows for encryption or other such special behavior because it is safely handled by the library. These behaviors are prohibited in the public version of this function. :param str encryption_data: The JSON formatted encryption metadata to upload as a part of the blob. This should only be passed internally from other methods and only applied when uploading entire blob contents immediately follows creation of the blob. ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('content_length', content_length) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = {'timeout': _int_to_str(timeout)} request.headers = { 'x-ms-blob-type': _to_str(self.blob_type), 'x-ms-blob-content-length': _to_str(content_length), 'x-ms-lease-id': _to_str(lease_id), 'x-ms-blob-sequence-number': _to_str(sequence_number), 'x-ms-access-tier': _to_str(premium_page_blob_tier), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match) } _add_metadata_headers(metadata, request) if content_settings is not None: request.headers.update(content_settings._to_headers()) if encryption_data is not None: request.headers['x-ms-meta-encryptiondata'] = encryption_data return await self._perform_request(request, _parse_base_properties) async def _update_page( self, container_name, blob_name, page, start_range, end_range, validate_content=False, lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None, if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): ''' See update_page for more details. This helper method allows for encryption or other such special behavior because it is safely handled by the library. These behaviors are prohibited in the public version of this function. ''' request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(container_name, blob_name) request.query = { 'comp': 'page', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-page-write': 'update', 'x-ms-lease-id': _to_str(lease_id), 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), 'If-Match': _to_str(if_match), 'If-None-Match': _to_str(if_none_match) } _validate_and_format_range_headers( request, start_range, end_range, align_to_page=True) request.body = _get_data_bytes_only('page', page) if validate_content: computed_md5 = _get_content_md5(request.body) request.headers['Content-MD5'] = _to_str(computed_md5) return await self._perform_request(request, _parse_page_properties)
53.244795
118
0.65478
9,930
74,170
4.733031
0.055891
0.011702
0.011873
0.018383
0.833741
0.818443
0.810549
0.797825
0.785251
0.780485
0
0.004713
0.293447
74,170
1,392
119
53.283046
0.89211
0.01599
0
0.556017
0
0
0.061935
0.015011
0
0
0
0
0
0
null
null
0
0.03112
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
07c0b3a5dcad5bf45c1b4f7f05c6ebb3d468b438
191
py
Python
platform/hwconf_data/efr32mg12p/modules/PIN/PIN_Snippets.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
null
null
null
platform/hwconf_data/efr32mg12p/modules/PIN/PIN_Snippets.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T02:36:22.000Z
2020-08-25T02:36:22.000Z
platform/hwconf_data/efr32mg12p/modules/PIN/PIN_Snippets.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T01:56:04.000Z
2020-08-25T01:56:04.000Z
""" Generated from a template """ import efr32mg12p.PythonSnippet.RuntimeModel as RuntimeModel from efr32mg12p.modules.PIN.PIN_Defs import PORT_PINS def activate_runtime(): pass
11.235294
60
0.769634
23
191
6.26087
0.782609
0
0
0
0
0
0
0
0
0
0
0.049689
0.157068
191
16
61
11.9375
0.844721
0.13089
0
0
1
0
0
0
0
0
0
0
0
1
0.25
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
1
1
1
0
1
0
0
7
07c430d528140be09b3a3537f23179d26e201762
22,475
py
Python
RecoJets/JetProducers/python/PileupJetIDParams_cfi.py
rmanzoni/cmssw
53286dbc96455754b882e6668ed6a33a6186def2
[ "Apache-2.0" ]
null
null
null
RecoJets/JetProducers/python/PileupJetIDParams_cfi.py
rmanzoni/cmssw
53286dbc96455754b882e6668ed6a33a6186def2
[ "Apache-2.0" ]
null
null
null
RecoJets/JetProducers/python/PileupJetIDParams_cfi.py
rmanzoni/cmssw
53286dbc96455754b882e6668ed6a33a6186def2
[ "Apache-2.0" ]
1
2015-05-08T02:08:04.000Z
2015-05-08T02:08:04.000Z
import FWCore.ParameterSet.Config as cms from RecoJets.JetProducers.PileupJetIDCutParams_cfi import * #################################################################################################################### full_81x_chs = cms.PSet( impactParTkThreshold = cms.double(1.), cutBased = cms.bool(False), etaBinnedWeights = cms.bool(True), tmvaMethod = cms.string("JetIDMVAHighPt"), version = cms.int32(-1), nEtaBins = cms.int32(4), trainings = cms.VPSet( cms.PSet( jEtaMin = cms.double(0.), jEtaMax = cms.double(2.5), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80XvarFix_Eta0to2p5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.5), jEtaMax = cms.double(2.75), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80XvarFix_Eta2p5to2p75_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.75), jEtaMax = cms.double(3.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80XvarFix_Eta2p75to3_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(3.), jEtaMax = cms.double(5.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80XvarFix_Eta3to5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "pull" , "jetR" , ) ), ), tmvaSpectators = cms.vstring( "jetPt" , "jetEta" , ), JetIdParams = full_81x_chs_wp, label = cms.string("full") ) #################################################################################################################### trainingVariables_102X_Eta0To3 = [ "nvtx" , "beta" , "dR2Mean" , "frac01" , "frac02" , "frac03" , "frac04" , "majW" , "minW" , "jetR" , "jetRchg" , "nParticles", "nCharged" , "ptD" , "pull" , ] trainingVariables_102X_Eta3To5 = list(trainingVariables_102X_Eta0To3) trainingVariables_102X_Eta3To5.remove('beta') trainingVariables_102X_Eta3To5.remove('jetRchg') trainingVariables_102X_Eta3To5.remove('nCharged') full_102x_chs = full_81x_chs.clone(JetIdParams = full_102x_chs_wp) full_102x_chs.trainings[0].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_102X_Eta0p0To2p5_chs_BDT.weights.xml.gz" full_102x_chs.trainings[0].tmvaVariables = trainingVariables_102X_Eta0To3 full_102x_chs.trainings[1].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_102X_Eta2p5To2p75_chs_BDT.weights.xml.gz" full_102x_chs.trainings[1].tmvaVariables = trainingVariables_102X_Eta0To3 full_102x_chs.trainings[2].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_102X_Eta2p75To3p0_chs_BDT.weights.xml.gz" full_102x_chs.trainings[2].tmvaVariables = trainingVariables_102X_Eta0To3 full_102x_chs.trainings[3].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_102X_Eta3p0To5p0_chs_BDT.weights.xml.gz" full_102x_chs.trainings[3].tmvaVariables = trainingVariables_102X_Eta3To5 #################################################################################################################### trainingVariables_94X_Eta0To3 = list(trainingVariables_102X_Eta0To3) trainingVariables_94X_Eta3To5 = list(trainingVariables_102X_Eta3To5) full_94x_chs = full_81x_chs.clone(JetIdParams = full_94x_chs_wp) full_94x_chs.trainings[0].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_94X_Eta0p0To2p5_chs_BDT.weights.xml.gz" full_94x_chs.trainings[0].tmvaVariables = trainingVariables_94X_Eta0To3 full_94x_chs.trainings[1].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_94X_Eta2p5To2p75_chs_BDT.weights.xml.gz" full_94x_chs.trainings[1].tmvaVariables = trainingVariables_94X_Eta0To3 full_94x_chs.trainings[2].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_94X_Eta2p75To3p0_chs_BDT.weights.xml.gz" full_94x_chs.trainings[2].tmvaVariables = trainingVariables_94X_Eta0To3 full_94x_chs.trainings[3].tmvaWeights = "RecoJets/JetProducers/data/pileupJetId_94X_Eta3p0To5p0_chs_BDT.weights.xml.gz" full_94x_chs.trainings[3].tmvaVariables = trainingVariables_94X_Eta3To5 #################################################################################################################### full_80x_chs = cms.PSet( impactParTkThreshold = cms.double(1.), cutBased = cms.bool(False), etaBinnedWeights = cms.bool(True), tmvaMethod = cms.string("JetIDMVAHighPt"), version = cms.int32(-1), nEtaBins = cms.int32(4), trainings = cms.VPSet( cms.PSet( jEtaMin = cms.double(0.), jEtaMax = cms.double(2.5), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80X_Eta0to2p5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.5), jEtaMax = cms.double(2.75), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80X_Eta2p5to2p75_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.75), jEtaMax = cms.double(3.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80X_Eta2p75to3_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(3.), jEtaMax = cms.double(5.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_80X_Eta3to5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "pull" , "jetR" , ) ), ), tmvaSpectators = cms.vstring( "jetPt" , "jetEta" , ), JetIdParams = full_80x_chs_wp, label = cms.string("full") ) #################################################################################################################### full_76x_chs = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(True), nEtaBins = cms.int32(4), trainings = cms.VPSet( cms.PSet( jEtaMin = cms.double(0.), jEtaMax = cms.double(2.5), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_76x_Eta0to2p5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.5), jEtaMax = cms.double(2.75), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_76x_Eta2p5to2p75_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.75), jEtaMax = cms.double(3.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_76x_Eta2p75to3_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(3.), jEtaMax = cms.double(5.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/pileupJetId_76x_Eta3to5_BDT.weights.xml.gz"), tmvaVariables = cms.vstring( "nvtx", "dR2Mean" , "nParticles" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "pull" , "jetR" , ) ), ), tmvaMethod = cms.string("JetIDMVAHighPt"), version = cms.int32(-1), tmvaSpectators = cms.vstring( "jetPt" , "jetEta" , ), JetIdParams = full_76x_chs_wp, label = cms.string("full") ) #################################################################################################################### full_74x_chs = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(True), nEtaBins = cms.int32(4), trainings = cms.VPSet( cms.PSet( jEtaMin = cms.double(0.), jEtaMax = cms.double(2.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_BDTG.weights_jteta_0_2_newNames.xml.gz"), tmvaVariables = cms.vstring( "dR2Mean" , "rho" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "betaStar" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.), jEtaMax = cms.double(2.5), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_BDTG.weights_jteta_2_2p5_newNames.xml.gz"), tmvaVariables = cms.vstring( "dR2Mean" , "rho" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "betaStar" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(2.5), jEtaMax = cms.double(3.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_BDTG.weights_jteta_2p5_3_newNames.xml.gz"), tmvaVariables = cms.vstring( "dR2Mean" , "rho" , "nParticles" , "nCharged" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "beta" , "betaStar" , "pull" , "jetR" , "jetRchg" , ) ), cms.PSet( jEtaMin = cms.double(3.), jEtaMax = cms.double(5.), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_BDTG.weights_jteta_3_5_newNames.xml.gz"), tmvaVariables = cms.vstring( "dR2Mean" , "rho" , "nParticles" , "majW" , "minW", "frac01" , "frac02" , "frac03" , "frac04" , "ptD" , "pull" , "jetR" , ) ), ), version = cms.int32(-1), tmvaSpectators = cms.vstring( "jetPt" , "jetEta" , "nTrueInt" , "dRMatch" , ), JetIdParams = full_74x_chs_wp, label = cms.string("full") ) #################################################################################################################### full_53x = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("CondFormats/JetMETObjects/data/TMVAClassificationCategory_JetID_53X_Dec2012.weights.xml"), tmvaMethod = cms.string("JetIDMVAHighPt"), version = cms.int32(-1), tmvaVariables = cms.vstring( "nvtx" , "dZ" , "beta" , "betaStar" , "nCharged" , "nNeutrals", "dR2Mean" , "ptD" , "frac01" , "frac02" , "frac03" , "frac04" , "frac05" , ), tmvaSpectators = cms.vstring( "jetPt", "jetEta", "jetPhi" ), JetIdParams = full_53x_wp, label = cms.string("full53x") ) #################################################################################################################### full_53x_chs = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("CondFormats/JetMETObjects/data/TMVAClassificationCategory_JetID_53X_chs_Dec2012.weights.xml"), #tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_JetID_53X_chs_Dec2012.weights.xml"), tmvaMethod = cms.string("JetIDMVAHighPt"), version = cms.int32(-1), tmvaVariables = cms.vstring( "nvtx" , "dZ" , "beta" , "betaStar" , "nCharged" , "nNeutrals", "dR2Mean" , "ptD" , "frac01" , "frac02" , "frac03" , "frac04" , "frac05" , ), tmvaSpectators = cms.vstring( "jetPt", "jetEta", "jetPhi" ), JetIdParams = full_53x_chs_wp, label = cms.string("full") ) #################################################################################################################### met_53x = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_JetID_MET_53X_Dec2012.weights.xml.gz"), tmvaMethod = cms.string("JetIDMVAMET"), version = cms.int32(-1), tmvaVariables = cms.vstring( "nvtx" , "jetPt" , "jetEta" , "jetPhi" , "dZ" , "beta" , "betaStar" , "nCharged" , "nNeutrals", "dR2Mean" , "ptD" , "frac01" , "frac02" , "frac03" , "frac04" , "frac05" , ), tmvaSpectators = cms.vstring(), JetIdParams = met_53x_wp, label = cms.string("met53x") ) ################################################################################################################## full_5x = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassificationCategory_JetID_MET_53X_Dec2012.weights.xml.gz"), tmvaMethod = cms.string("BDT_fullPlusRMS"), version = cms.int32(-1), tmvaVariables = cms.vstring( "frac01", "frac02", "frac03", "frac04", "frac05", "dR2Mean", "nvtx", "nNeutrals", "beta", "betaStar", "dZ", "nCharged", ), tmvaSpectators = cms.vstring( "jetPt", "jetEta", ), JetIdParams = full_5x_wp, label = cms.string("full") ) ################################################################################################################## full_5x_chs = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/TMVAClassification_5x_BDT_chsFullPlusRMS.weights.xml.gz"), tmvaMethod = cms.string("BDT_chsFullPlusRMS"), version = cms.int32(-1), tmvaVariables = cms.vstring( "frac01", "frac02", "frac03", "frac04", "frac05", "dR2Mean", "nvtx", "nNeutrals", "beta", "betaStar", "dZ", "nCharged", ), tmvaSpectators = cms.vstring( "jetPt", "jetEta", ), JetIdParams = full_5x_chs_wp, label = cms.string("full") ) #################################################################################################################### cutbased = cms.PSet( impactParTkThreshold = cms.double(1.), cutBased = cms.bool(True), JetIdParams = PuJetIdCutBased_wp, label = cms.string("cutbased") ) #################################################################################################################### PhilV1 = cms.PSet( impactParTkThreshold = cms.double(1.) , cutBased = cms.bool(False), etaBinnedWeights = cms.bool(False), tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/mva_JetID_v1.weights.xml.gz"), tmvaMethod = cms.string("JetID"), version = cms.int32(-1), tmvaVariables = cms.vstring( "nvtx", "jetPt", "jetEta", "jetPhi", "dZ", "d0", "beta", "betaStar", "nCharged", "nNeutrals", "dRMean", "frac01", "frac02", "frac03", "frac04", "frac05", ), tmvaSpectators = cms.vstring(), JetIdParams = JetIdParams, label = cms.string("philv1") )
34.208524
141
0.427408
1,581
22,475
5.925996
0.075901
0.041306
0.074288
0.058918
0.916213
0.893158
0.876935
0.845875
0.746825
0.682143
0
0.050367
0.399288
22,475
656
142
34.260671
0.643582
0.005339
0
0.822504
0
0
0.216365
0.11377
0
0
0
0
0
1
0
false
0
0.00317
0
0.00317
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
07d63f8a948065297bfdea497d199d76805779a1
165,434
py
Python
sympy/integrals/rubi/rubi_tests/tests/test_sine.py
Michal-Gagala/sympy
3cc756c2af73b5506102abaeefd1b654e286e2c8
[ "MIT" ]
null
null
null
sympy/integrals/rubi/rubi_tests/tests/test_sine.py
Michal-Gagala/sympy
3cc756c2af73b5506102abaeefd1b654e286e2c8
[ "MIT" ]
null
null
null
sympy/integrals/rubi/rubi_tests/tests/test_sine.py
Michal-Gagala/sympy
3cc756c2af73b5506102abaeefd1b654e286e2c8
[ "MIT" ]
null
null
null
import sys from sympy.external import import_module matchpy = import_module("matchpy") if not matchpy: #bin/test will not execute any tests now disabled = True if sys.version_info[:2] < (3, 6): disabled = True from sympy.integrals.rubi.utility_function import ( sympy_op_factory, Int, Sum, Set, With, Module, Scan, MapAnd, FalseQ, ZeroQ, NegativeQ, NonzeroQ, FreeQ, NFreeQ, List, Log, PositiveQ, PositiveIntegerQ, NegativeIntegerQ, IntegerQ, IntegersQ, ComplexNumberQ, PureComplexNumberQ, RealNumericQ, PositiveOrZeroQ, NegativeOrZeroQ, FractionOrNegativeQ, NegQ, Equal, Unequal, IntPart, FracPart, RationalQ, ProductQ, SumQ, NonsumQ, Subst, First, Rest, SqrtNumberQ, SqrtNumberSumQ, LinearQ, Sqrt, ArcCosh, Coefficient, Denominator, Hypergeometric2F1, Not, Simplify, FractionalPart, IntegerPart, AppellF1, EllipticPi, EllipticE, EllipticF, ArcTan, ArcCot, ArcCoth, ArcTanh, ArcSin, ArcSinh, ArcCos, ArcCsc, ArcSec, ArcCsch, ArcSech, Sinh, Tanh, Cosh, Sech, Csch, Coth, LessEqual, Less, Greater, GreaterEqual, FractionQ, IntLinearcQ, Expand, IndependentQ, PowerQ, IntegerPowerQ, PositiveIntegerPowerQ, FractionalPowerQ, AtomQ, ExpQ, LogQ, Head, MemberQ, TrigQ, SinQ, CosQ, TanQ, CotQ, SecQ, CscQ, Sin, Cos, Tan, Cot, Sec, Csc, HyperbolicQ, SinhQ, CoshQ, TanhQ, CothQ, SechQ, CschQ, InverseTrigQ, SinCosQ, SinhCoshQ, LeafCount, Numerator, NumberQ, NumericQ, Length, ListQ, Im, Re, InverseHyperbolicQ, InverseFunctionQ, TrigHyperbolicFreeQ, InverseFunctionFreeQ, RealQ, EqQ, FractionalPowerFreeQ, ComplexFreeQ, PolynomialQ, FactorSquareFree, PowerOfLinearQ, Exponent, QuadraticQ, LinearPairQ, BinomialParts, TrinomialParts, PolyQ, EvenQ, OddQ, PerfectSquareQ, NiceSqrtAuxQ, NiceSqrtQ, Together, PosAux, PosQ, CoefficientList, ReplaceAll, ExpandLinearProduct, GCD, ContentFactor, NumericFactor, NonnumericFactors, MakeAssocList, GensymSubst, KernelSubst, ExpandExpression, Apart, SmartApart, MatchQ, PolynomialQuotientRemainder, FreeFactors, NonfreeFactors, RemoveContentAux, RemoveContent, FreeTerms, NonfreeTerms, ExpandAlgebraicFunction, CollectReciprocals, ExpandCleanup, AlgebraicFunctionQ, Coeff, LeadTerm, RemainingTerms, LeadFactor, RemainingFactors, LeadBase, LeadDegree, Numer, Denom, hypergeom, Expon, MergeMonomials, PolynomialDivide, BinomialQ, TrinomialQ, GeneralizedBinomialQ, GeneralizedTrinomialQ, FactorSquareFreeList, PerfectPowerTest, SquareFreeFactorTest, RationalFunctionQ, RationalFunctionFactors, NonrationalFunctionFactors, Reverse, RationalFunctionExponents, RationalFunctionExpand, ExpandIntegrand, SimplerQ, SimplerSqrtQ, SumSimplerQ, BinomialDegree, TrinomialDegree, CancelCommonFactors, SimplerIntegrandQ, GeneralizedBinomialDegree, GeneralizedBinomialParts, GeneralizedTrinomialDegree, GeneralizedTrinomialParts, MonomialQ, MonomialSumQ, MinimumMonomialExponent, MonomialExponent, LinearMatchQ, PowerOfLinearMatchQ, QuadraticMatchQ, CubicMatchQ, BinomialMatchQ, TrinomialMatchQ, GeneralizedBinomialMatchQ, GeneralizedTrinomialMatchQ, QuotientOfLinearsMatchQ, PolynomialTermQ, PolynomialTerms, NonpolynomialTerms, PseudoBinomialParts, NormalizePseudoBinomial, PseudoBinomialPairQ, PseudoBinomialQ, PolynomialGCD, PolyGCD, AlgebraicFunctionFactors, NonalgebraicFunctionFactors, QuotientOfLinearsP, QuotientOfLinearsParts, QuotientOfLinearsQ, Flatten, Sort, AbsurdNumberQ, AbsurdNumberFactors, NonabsurdNumberFactors, SumSimplerAuxQ, Prepend, Drop, CombineExponents, FactorInteger, FactorAbsurdNumber, SubstForInverseFunction, SubstForFractionalPower, SubstForFractionalPowerOfQuotientOfLinears, FractionalPowerOfQuotientOfLinears, SubstForFractionalPowerQ, SubstForFractionalPowerAuxQ, FractionalPowerOfSquareQ, FractionalPowerSubexpressionQ, Apply, FactorNumericGcd, MergeableFactorQ, MergeFactor, MergeFactors, TrigSimplifyQ, TrigSimplify, TrigSimplifyRecur, Order, FactorOrder, Smallest, OrderedQ, MinimumDegree, PositiveFactors, Sign, NonpositiveFactors, PolynomialInAuxQ, PolynomialInQ, ExponentInAux, ExponentIn, PolynomialInSubstAux, PolynomialInSubst, Distrib, DistributeDegree, FunctionOfPower, DivideDegreesOfFactors, MonomialFactor, FullSimplify, FunctionOfLinearSubst, FunctionOfLinear, NormalizeIntegrand, NormalizeIntegrandAux, NormalizeIntegrandFactor, NormalizeIntegrandFactorBase, NormalizeTogether, NormalizeLeadTermSigns, AbsorbMinusSign, NormalizeSumFactors, SignOfFactor, NormalizePowerOfLinear, SimplifyIntegrand, SimplifyTerm, TogetherSimplify, SmartSimplify, SubstForExpn, ExpandToSum, UnifySum, UnifyTerms, UnifyTerm, CalculusQ, FunctionOfInverseLinear, PureFunctionOfSinhQ, PureFunctionOfTanhQ, PureFunctionOfCoshQ, IntegerQuotientQ, OddQuotientQ, EvenQuotientQ, FindTrigFactor, FunctionOfSinhQ, FunctionOfCoshQ, OddHyperbolicPowerQ, FunctionOfTanhQ, FunctionOfTanhWeight, FunctionOfHyperbolicQ, SmartNumerator, SmartDenominator, SubstForAux, ActivateTrig, ExpandTrig, TrigExpand, SubstForTrig, SubstForHyperbolic, InertTrigFreeQ, LCM, SubstForFractionalPowerOfLinear, FractionalPowerOfLinear, InverseFunctionOfLinear, InertTrigQ, InertReciprocalQ, DeactivateTrig, FixInertTrigFunction, DeactivateTrigAux, PowerOfInertTrigSumQ, PiecewiseLinearQ, KnownTrigIntegrandQ, KnownSineIntegrandQ, KnownTangentIntegrandQ, KnownCotangentIntegrandQ, KnownSecantIntegrandQ, TryPureTanSubst, TryTanhSubst, TryPureTanhSubst, AbsurdNumberGCD, AbsurdNumberGCDList, ExpandTrigExpand, ExpandTrigReduce, ExpandTrigReduceAux, NormalizeTrig, TrigToExp, ExpandTrigToExp, TrigReduce, FunctionOfTrig, AlgebraicTrigFunctionQ, FunctionOfHyperbolic, FunctionOfQ, FunctionOfExpnQ, PureFunctionOfSinQ, PureFunctionOfCosQ, PureFunctionOfTanQ, PureFunctionOfCotQ, FunctionOfCosQ, FunctionOfSinQ, OddTrigPowerQ, FunctionOfTanQ, FunctionOfTanWeight, FunctionOfTrigQ, FunctionOfDensePolynomialsQ, FunctionOfLog, PowerVariableExpn, PowerVariableDegree, PowerVariableSubst, EulerIntegrandQ, FunctionOfSquareRootOfQuadratic, SquareRootOfQuadraticSubst, Divides, EasyDQ, ProductOfLinearPowersQ, Rt, NthRoot, AtomBaseQ, SumBaseQ, NegSumBaseQ, AllNegTermQ, SomeNegTermQ, TrigSquareQ, RtAux, TrigSquare, IntSum, IntTerm, Map2, ConstantFactor, SameQ, ReplacePart, CommonFactors, MostMainFactorPosition, FunctionOfExponentialQ, FunctionOfExponential, FunctionOfExponentialFunction, FunctionOfExponentialFunctionAux, FunctionOfExponentialTest, FunctionOfExponentialTestAux, stdev, rubi_test, If, IntQuadraticQ, IntBinomialQ, RectifyTangent, RectifyCotangent, Inequality, Condition, Simp, SimpHelp, SplitProduct, SplitSum, SubstFor, SubstForAux, FresnelS, FresnelC, Erfc, Erfi, Gamma, FunctionOfTrigOfLinearQ, ElementaryFunctionQ, Complex, UnsameQ, _SimpFixFactor, SimpFixFactor, _FixSimplify, FixSimplify, _SimplifyAntiderivativeSum, SimplifyAntiderivativeSum, _SimplifyAntiderivative, SimplifyAntiderivative, _TrigSimplifyAux, TrigSimplifyAux, Cancel, Part, PolyLog, D, Dist, Sum_doit, PolynomialQuotient, Floor, PolynomialRemainder, Factor, PolyLog, CosIntegral, SinIntegral, LogIntegral, SinhIntegral, CoshIntegral, Rule, Erf, PolyGamma, ExpIntegralEi, ExpIntegralE, LogGamma , UtilityOperator, Factorial, Zeta, ProductLog, DerivativeDivides, HypergeometricPFQ, IntHide, OneQ ) from sympy.core.add import Add from sympy.core.mod import Mod from sympy.core.mul import Mul from sympy.core.numbers import (Float, I, Integer) from sympy.core.power import Pow from sympy.core.singleton import S from sympy.functions.elementary.complexes import Abs from sympy.functions.elementary.miscellaneous import sqrt from sympy.integrals.integrals import Integral from sympy.logic.boolalg import (And, Or) from sympy.simplify.simplify import simplify from sympy.integrals.rubi.symbol import WC from sympy.core.symbol import symbols, Symbol from sympy.functions import (sin, cos, tan, cot, csc, sec, sqrt, erf, exp, log) from sympy.functions.elementary.hyperbolic import (acosh, asinh, atanh, acoth, acsch, asech, cosh, sinh, tanh, coth, sech, csch) from sympy.functions.elementary.trigonometric import (atan, acsc, asin, acot, acos, asec) from sympy.integrals.rubi.rubimain import rubi_integrate from sympy.core.numbers import pi as Pi a, b, c, d, e, f, m, n, x, u , k, p, r, s, t, i, j= symbols('a b c d e f m n x u k p r s t i j') A, B, C, D, a, b, c, d, e, f, g, h, y, z, m, n, p, q, u, v, w, F = symbols('A B C D a b c d e f g h y z m n p q u v w F', ) def test_1(): assert rubi_test(rubi_integrate(sin(a + b*x), x), x, -cos(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2), x), x, x/S(2) - sin(a + b*x)*cos(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3), x), x, cos(a + b*x)**S(3)/(S(3)*b) - cos(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4), x), x, S(3)*x/S(8) - sin(a + b*x)**S(3)*cos(a + b*x)/(S(4)*b) - S(3)*sin(a + b*x)*cos(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5), x), x, -cos(a + b*x)**S(5)/(S(5)*b) + S(2)*cos(a + b*x)**S(3)/(S(3)*b) - cos(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(6), x), x, S(5)*x/S(16) - sin(a + b*x)**S(5)*cos(a + b*x)/(S(6)*b) - S(5)*sin(a + b*x)**S(3)*cos(a + b*x)/(S(24)*b) - S(5)*sin(a + b*x)*cos(a + b*x)/(S(16)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(7), x), x, cos(a + b*x)**S(7)/(S(7)*b) - S(3)*cos(a + b*x)**S(5)/(S(5)*b) + cos(a + b*x)**S(3)/b - cos(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(8), x), x, S(35)*x/S(128) - sin(a + b*x)**S(7)*cos(a + b*x)/(S(8)*b) - S(7)*sin(a + b*x)**S(5)*cos(a + b*x)/(S(48)*b) - S(35)*sin(a + b*x)**S(3)*cos(a + b*x)/(S(192)*b) - S(35)*sin(a + b*x)*cos(a + b*x)/(S(128)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(7)/2), x), x, S(10)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(21)*b) - S(2)*sin(a + b*x)**(S(5)/2)*cos(a + b*x)/(S(7)*b) - S(10)*sqrt(sin(a + b*x))*cos(a + b*x)/(S(21)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(5)/2), x), x, S(6)*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(5)*b) - S(2)*sin(a + b*x)**(S(3)/2)*cos(a + b*x)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(3)/2), x), x, S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(3)*b) - S(2)*sqrt(sin(a + b*x))*cos(a + b*x)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(sin(a + b*x)), x), x, S(2)*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(sin(a + b*x)), x), x, S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(-3)/2), x), x, -S(2)*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/b - S(2)*cos(a + b*x)/(b*sqrt(sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(-5)/2), x), x, S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(3)*b) - S(2)*cos(a + b*x)/(S(3)*b*sin(a + b*x)**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(-7)/2), x), x, -S(6)*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(5)*b) - S(6)*cos(a + b*x)/(S(5)*b*sqrt(sin(a + b*x))) - S(2)*cos(a + b*x)/(S(5)*b*sin(a + b*x)**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(7)/2), x), x, S(10)*c**S(4)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))*sqrt(sin(a + b*x))/(S(21)*b*sqrt(c*sin(a + b*x))) - S(10)*c**S(3)*sqrt(c*sin(a + b*x))*cos(a + b*x)/(S(21)*b) - S(2)*c*(c*sin(a + b*x))**(S(5)/2)*cos(a + b*x)/(S(7)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2), x), x, S(6)*c**S(2)*sqrt(c*sin(a + b*x))*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(5)*b*sqrt(sin(a + b*x))) - S(2)*c*(c*sin(a + b*x))**(S(3)/2)*cos(a + b*x)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2), x), x, S(2)*c**S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))*sqrt(sin(a + b*x))/(S(3)*b*sqrt(c*sin(a + b*x))) - S(2)*c*sqrt(c*sin(a + b*x))*cos(a + b*x)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x)), x), x, S(2)*sqrt(c*sin(a + b*x))*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(b*sqrt(sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(c*sin(a + b*x)), x), x, S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))*sqrt(sin(a + b*x))/(b*sqrt(c*sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-3)/2), x), x, -S(2)*cos(a + b*x)/(b*c*sqrt(c*sin(a + b*x))) - S(2)*sqrt(c*sin(a + b*x))*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(b*c**S(2)*sqrt(sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-5)/2), x), x, -S(2)*cos(a + b*x)/(S(3)*b*c*(c*sin(a + b*x))**(S(3)/2)) + S(2)*EllipticF(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))*sqrt(sin(a + b*x))/(S(3)*b*c**S(2)*sqrt(c*sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-7)/2), x), x, -S(2)*cos(a + b*x)/(S(5)*b*c*(c*sin(a + b*x))**(S(5)/2)) - S(6)*cos(a + b*x)/(S(5)*b*c**S(3)*sqrt(c*sin(a + b*x))) - S(6)*sqrt(c*sin(a + b*x))*EllipticE(-Pi/S(4) + a/S(2) + b*x/S(2), S(2))/(S(5)*b*c**S(4)*sqrt(sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(4)/3), x), x, S(3)*(c*sin(a + b*x))**(S(7)/3)*Hypergeometric2F1(S(1)/2, S(7)/6, S(13)/6, sin(a + b*x)**S(2))*cos(a + b*x)/(S(7)*b*c*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(2)/3), x), x, S(3)*(c*sin(a + b*x))**(S(5)/3)*Hypergeometric2F1(S(1)/2, S(5)/6, S(11)/6, sin(a + b*x)**S(2))*cos(a + b*x)/(S(5)*b*c*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(1)/3), x), x, -S(3)*c**(S(1)/3)*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))*sqrt(S(9)/2 - S(3)*sqrt(S(3))*I/S(2))*sqrt((-sqrt(S(3)) + I)/(-sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) + sqrt(S(3))*I)))*sqrt((sqrt(S(3)) + I)/(sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) - sqrt(S(3))*I)))*EllipticE(asin(sqrt(S(2))*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))/sqrt(S(3) + sqrt(S(3))*I)), (-sqrt(S(3)) + S(3)*I)/(sqrt(S(3)) + S(3)*I))*sec(a + b*x)/b + S(3)*sqrt(S(2))*c**(S(1)/3)*(S(1) - sqrt(S(3))*I)*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))*sqrt(S(3) - sqrt(S(3))*I)*sqrt((-sqrt(S(3)) + I)/(-sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) + sqrt(S(3))*I)))*sqrt((sqrt(S(3)) + I)/(sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) - sqrt(S(3))*I)))*EllipticF(asin(sqrt(S(2))*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))/sqrt(S(3) - sqrt(S(3))*I)), (sqrt(S(3)) + S(3)*I)/(-sqrt(S(3)) + S(3)*I))*sec(a + b*x)/(S(4)*b), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((c*sin(a + b*x))**(S(1)/3), x), x, S(3)*(c*sin(a + b*x))**(S(4)/3)*Hypergeometric2F1(S(1)/2, S(2)/3, S(5)/3, sin(a + b*x)**S(2))*cos(a + b*x)/(S(4)*b*c*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-1)/3), x), x, -S(3)*sqrt(S(2))*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))*sqrt(S(3) - sqrt(S(3))*I)*sqrt((-sqrt(S(3)) + I)/(-sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) + sqrt(S(3))*I)))*sqrt((sqrt(S(3)) + I)/(sqrt(S(3)) + S(3)*I) + S(2)*(c*sin(a + b*x))**(S(2)/3)/(c**(S(2)/3)*(S(3) - sqrt(S(3))*I)))*EllipticF(asin(sqrt(S(2))*sqrt(S(1) - (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3))/sqrt(S(3) - sqrt(S(3))*I)), (sqrt(S(3)) + S(3)*I)/(-sqrt(S(3)) + S(3)*I))*sec(a + b*x)/(S(2)*b*c**(S(1)/3)), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-1)/3), x), x, S(3)*(c*sin(a + b*x))**(S(2)/3)*Hypergeometric2F1(S(1)/3, S(1)/2, S(4)/3, sin(a + b*x)**S(2))*cos(a + b*x)/(S(2)*b*c*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-2)/3), x), x, S(3)**(S(3)/4)*(c*sin(a + b*x))**(S(1)/3)*sqrt(c**(S(4)/3)*(S(1) + (c*sin(a + b*x))**(S(2)/3)/c**(S(2)/3) + (c*sin(a + b*x))**(S(4)/3)/c**(S(4)/3))/(c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3)*(S(1) + sqrt(S(3))))**S(2))*(c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3))*EllipticF(acos((c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3)*(-sqrt(S(3)) + S(1)))/(c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3)*(S(1) + sqrt(S(3))))), sqrt(S(3))/S(4) + S(1)/2)*sec(a + b*x)/(S(2)*b*c**(S(5)/3)*sqrt(-(c*sin(a + b*x))**(S(2)/3)*(c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3))/(c**(S(2)/3) - (c*sin(a + b*x))**(S(2)/3)*(S(1) + sqrt(S(3))))**S(2))), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-2)/3), x), x, S(3)*(c*sin(a + b*x))**(S(1)/3)*Hypergeometric2F1(S(1)/6, S(1)/2, S(7)/6, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(-4)/3), x), x, -S(3)*Hypergeometric2F1(S(-1)/6, S(1)/2, S(5)/6, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*(c*sin(a + b*x))**(S(1)/3)*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**n, x), x, Hypergeometric2F1(S(1)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)**(n + S(1))*cos(a + b*x)/(b*(n + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**n, x), x, (c*sin(a + b*x))**(n + S(1))*Hypergeometric2F1(S(1)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*(n + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(2))**(S(5)/2), x), x, -S(8)*a**S(2)*sqrt(a*sin(x)**S(2))*cot(x)/S(15) - S(4)*a*(a*sin(x)**S(2))**(S(3)/2)*cot(x)/S(15) - (a*sin(x)**S(2))**(S(5)/2)*cot(x)/S(5), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(2))**(S(3)/2), x), x, -S(2)*a*sqrt(a*sin(x)**S(2))*cot(x)/S(3) - (a*sin(x)**S(2))**(S(3)/2)*cot(x)/S(3), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(a*sin(x)**S(2)), x), x, -sqrt(a*sin(x)**S(2))*cot(x), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(a*sin(x)**S(2)), x), x, -sin(x)*atanh(cos(x))/sqrt(a*sin(x)**S(2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(2))**(S(-3)/2), x), x, -sin(x)*atanh(cos(x))/(S(2)*a*sqrt(a*sin(x)**S(2))) - cot(x)/(S(2)*a*sqrt(a*sin(x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(2))**(S(-5)/2), x), x, -cot(x)/(S(4)*a*(a*sin(x)**S(2))**(S(3)/2)) - S(3)*sin(x)*atanh(cos(x))/(S(8)*a**S(2)*sqrt(a*sin(x)**S(2))) - S(3)*cot(x)/(S(8)*a**S(2)*sqrt(a*sin(x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(3))**(S(5)/2), x), x, -S(26)*a**S(2)*sqrt(a*sin(x)**S(3))*EllipticF(Pi/S(4) - x/S(2), S(2))/(S(77)*sin(x)**(S(3)/2)) - S(2)*a**S(2)*sqrt(a*sin(x)**S(3))*sin(x)**S(5)*cos(x)/S(15) - S(26)*a**S(2)*sqrt(a*sin(x)**S(3))*sin(x)**S(3)*cos(x)/S(165) - S(78)*a**S(2)*sqrt(a*sin(x)**S(3))*sin(x)*cos(x)/S(385) - S(26)*a**S(2)*sqrt(a*sin(x)**S(3))*cot(x)/S(77), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(3))**(S(3)/2), x), x, -S(14)*a*sqrt(a*sin(x)**S(3))*EllipticE(Pi/S(4) - x/S(2), S(2))/(S(15)*sin(x)**(S(3)/2)) - S(2)*a*sqrt(a*sin(x)**S(3))*sin(x)**S(2)*cos(x)/S(9) - S(14)*a*sqrt(a*sin(x)**S(3))*cos(x)/S(45), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(a*sin(x)**S(3)), x), x, -S(2)*sqrt(a*sin(x)**S(3))*EllipticF(Pi/S(4) - x/S(2), S(2))/(S(3)*sin(x)**(S(3)/2)) - S(2)*sqrt(a*sin(x)**S(3))*cot(x)/S(3), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(a*sin(x)**S(3)), x), x, S(2)*EllipticE(Pi/S(4) - x/S(2), S(2))*sin(x)**(S(3)/2)/sqrt(a*sin(x)**S(3)) - S(2)*sin(x)*cos(x)/sqrt(a*sin(x)**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(3))**(S(-3)/2), x), x, -S(10)*EllipticF(Pi/S(4) - x/S(2), S(2))*sin(x)**(S(3)/2)/(S(21)*a*sqrt(a*sin(x)**S(3))) - S(10)*cos(x)/(S(21)*a*sqrt(a*sin(x)**S(3))) - S(2)*cot(x)*csc(x)/(S(7)*a*sqrt(a*sin(x)**S(3))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(3))**(S(-5)/2), x), x, S(154)*EllipticE(Pi/S(4) - x/S(2), S(2))*sin(x)**(S(3)/2)/(S(195)*a**S(2)*sqrt(a*sin(x)**S(3))) - S(154)*sin(x)*cos(x)/(S(195)*a**S(2)*sqrt(a*sin(x)**S(3))) - S(2)*cot(x)*csc(x)**S(4)/(S(13)*a**S(2)*sqrt(a*sin(x)**S(3))) - S(22)*cot(x)*csc(x)**S(2)/(S(117)*a**S(2)*sqrt(a*sin(x)**S(3))) - S(154)*cot(x)/(S(585)*a**S(2)*sqrt(a*sin(x)**S(3))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(4))**(S(5)/2), x), x, S(63)*a**S(2)*x*sqrt(a*sin(x)**S(4))*csc(x)**S(2)/S(256) - a**S(2)*sqrt(a*sin(x)**S(4))*sin(x)**S(7)*cos(x)/S(10) - S(9)*a**S(2)*sqrt(a*sin(x)**S(4))*sin(x)**S(5)*cos(x)/S(80) - S(21)*a**S(2)*sqrt(a*sin(x)**S(4))*sin(x)**S(3)*cos(x)/S(160) - S(21)*a**S(2)*sqrt(a*sin(x)**S(4))*sin(x)*cos(x)/S(128) - S(63)*a**S(2)*sqrt(a*sin(x)**S(4))*cot(x)/S(256), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(4))**(S(3)/2), x), x, S(5)*a*x*sqrt(a*sin(x)**S(4))*csc(x)**S(2)/S(16) - a*sqrt(a*sin(x)**S(4))*sin(x)**S(3)*cos(x)/S(6) - S(5)*a*sqrt(a*sin(x)**S(4))*sin(x)*cos(x)/S(24) - S(5)*a*sqrt(a*sin(x)**S(4))*cot(x)/S(16), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(a*sin(x)**S(4)), x), x, x*sqrt(a*sin(x)**S(4))*csc(x)**S(2)/S(2) - sqrt(a*sin(x)**S(4))*cot(x)/S(2), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(a*sin(x)**S(4)), x), x, -sin(x)*cos(x)/sqrt(a*sin(x)**S(4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(4))**(S(-3)/2), x), x, -sin(x)*cos(x)/(a*sqrt(a*sin(x)**S(4))) - cos(x)**S(2)*cot(x)**S(3)/(S(5)*a*sqrt(a*sin(x)**S(4))) - S(2)*cos(x)**S(2)*cot(x)/(S(3)*a*sqrt(a*sin(x)**S(4))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(x)**S(4))**(S(-5)/2), x), x, -sin(x)*cos(x)/(a**S(2)*sqrt(a*sin(x)**S(4))) - cos(x)**S(2)*cot(x)**S(7)/(S(9)*a**S(2)*sqrt(a*sin(x)**S(4))) - S(4)*cos(x)**S(2)*cot(x)**S(5)/(S(7)*a**S(2)*sqrt(a*sin(x)**S(4))) - S(6)*cos(x)**S(2)*cot(x)**S(3)/(S(5)*a**S(2)*sqrt(a*sin(x)**S(4))) - S(4)*cos(x)**S(2)*cot(x)/(S(3)*a**S(2)*sqrt(a*sin(x)**S(4))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(c + d*x)**p)**n, x), x, (b*sin(c + d*x)**p)**n*Hypergeometric2F1(S(1)/2, n*p/S(2) + S(1)/2, n*p/S(2) + S(3)/2, sin(c + d*x)**S(2))*sin(c + d*x)*cos(c + d*x)/(d*(n*p + S(1))*sqrt(cos(c + d*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**S(2))**n, x), x, (c*sin(a + b*x)**S(2))**n*Hypergeometric2F1(S(1)/2, n + S(1)/2, n + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)*cos(a + b*x)/(b*(S(2)*n + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**S(3))**n, x), x, (c*sin(a + b*x)**S(3))**n*Hypergeometric2F1(S(1)/2, S(3)*n/S(2) + S(1)/2, S(3)*n/S(2) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)*cos(a + b*x)/(b*(S(3)*n + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**S(4))**n, x), x, (c*sin(a + b*x)**S(4))**n*Hypergeometric2F1(S(1)/2, S(2)*n + S(1)/2, S(2)*n + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)*cos(a + b*x)/(b*(S(4)*n + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**m)**(S(5)/2), x), x, S(2)*c**S(2)*sqrt(c*sin(a + b*x)**m)*Hypergeometric2F1(S(1)/2, S(5)*m/S(4) + S(1)/2, S(5)*m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)**(S(2)*m + S(1))*cos(a + b*x)/(b*(S(5)*m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**m)**(S(3)/2), x), x, S(2)*c*sqrt(c*sin(a + b*x)**m)*Hypergeometric2F1(S(1)/2, S(3)*m/S(4) + S(1)/2, S(3)*m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)**(m + S(1))*cos(a + b*x)/(b*(S(3)*m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x)**m), x), x, S(2)*sqrt(c*sin(a + b*x)**m)*Hypergeometric2F1(S(1)/2, m/S(4) + S(1)/2, m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)*cos(a + b*x)/(b*(m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(c*sin(a + b*x)**m), x), x, S(2)*Hypergeometric2F1(S(1)/2, -m/S(4) + S(1)/2, -m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)*cos(a + b*x)/(b*sqrt(c*sin(a + b*x)**m)*(-m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**m)**(S(-3)/2), x), x, S(2)*Hypergeometric2F1(S(1)/2, -S(3)*m/S(4) + S(1)/2, -S(3)*m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)**(-m + S(1))*cos(a + b*x)/(b*c*sqrt(c*sin(a + b*x)**m)*(-S(3)*m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**m)**(S(-5)/2), x), x, S(2)*Hypergeometric2F1(S(1)/2, -S(5)*m/S(4) + S(1)/2, -S(5)*m/S(4) + S(3)/2, sin(a + b*x)**S(2))*sin(a + b*x)**(-S(2)*m + S(1))*cos(a + b*x)/(b*c**S(2)*sqrt(c*sin(a + b*x)**m)*(-S(5)*m + S(2))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x)**m)**(S(1)/m), x), x, -(c*sin(a + b*x)**m)**(S(1)/m)*cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*(b*sin(c + d*x))**p)**n, x), x, (a*(b*sin(c + d*x))**p)**n*Hypergeometric2F1(S(1)/2, n*p/S(2) + S(1)/2, n*p/S(2) + S(3)/2, sin(c + d*x)**S(2))*sin(c + d*x)*cos(c + d*x)/(d*(n*p + S(1))*sqrt(cos(c + d*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(e + f*x))**m*(b*sin(e + f*x))**n, x), x, (a*sin(e + f*x))**(m + S(1))*(b*sin(e + f*x))**n*Hypergeometric2F1(S(1)/2, m/S(2) + n/S(2) + S(1)/2, m/S(2) + n/S(2) + S(3)/2, sin(e + f*x)**S(2))*cos(e + f*x)/(a*f*(m + n + S(1))*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*cos(a + b*x)**S(3), x), x, -cos(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*cos(a + b*x)**S(2), x), x, -cos(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*cos(a + b*x), x), x, sin(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*sec(a + b*x), x), x, -log(cos(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*sec(a + b*x)**S(2), x), x, sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*sec(a + b*x)**S(3), x), x, sec(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)*sec(a + b*x)**S(4), x), x, sec(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(7), x), x, -sin(a + b*x)**S(9)/(S(9)*b) + S(3)*sin(a + b*x)**S(7)/(S(7)*b) - S(3)*sin(a + b*x)**S(5)/(S(5)*b) + sin(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(5), x), x, sin(a + b*x)**S(7)/(S(7)*b) - S(2)*sin(a + b*x)**S(5)/(S(5)*b) + sin(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(3), x), x, -sin(a + b*x)**S(5)/(S(5)*b) + sin(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x), x), x, sin(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(2), x), x, -x + tan(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(4), x), x, tan(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(6), x), x, tan(a + b*x)**S(5)/(S(5)*b) + tan(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(8), x), x, tan(a + b*x)**S(7)/(S(7)*b) + S(2)*tan(a + b*x)**S(5)/(S(5)*b) + tan(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(10), x), x, tan(a + b*x)**S(9)/(S(9)*b) + S(3)*tan(a + b*x)**S(7)/(S(7)*b) + S(3)*tan(a + b*x)**S(5)/(S(5)*b) + tan(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(6), x), x, S(5)*x/S(128) - sin(a + b*x)*cos(a + b*x)**S(7)/(S(8)*b) + sin(a + b*x)*cos(a + b*x)**S(5)/(S(48)*b) + S(5)*sin(a + b*x)*cos(a + b*x)**S(3)/(S(192)*b) + S(5)*sin(a + b*x)*cos(a + b*x)/(S(128)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(4), x), x, x/S(16) - sin(a + b*x)*cos(a + b*x)**S(5)/(S(6)*b) + sin(a + b*x)*cos(a + b*x)**S(3)/(S(24)*b) + sin(a + b*x)*cos(a + b*x)/(S(16)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*cos(a + b*x)**S(2), x), x, x/S(8) - sin(a + b*x)*cos(a + b*x)**S(3)/(S(4)*b) + sin(a + b*x)*cos(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2), x), x, x/S(2) - sin(a + b*x)*cos(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x), x), x, -sin(a + b*x)/b + atanh(sin(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(3), x), x, tan(a + b*x)*sec(a + b*x)/(S(2)*b) - atanh(sin(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(5), x), x, tan(a + b*x)*sec(a + b*x)**S(3)/(S(4)*b) - tan(a + b*x)*sec(a + b*x)/(S(8)*b) - atanh(sin(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)*sec(a + b*x)**S(7), x), x, tan(a + b*x)*sec(a + b*x)**S(5)/(S(6)*b) - tan(a + b*x)*sec(a + b*x)**S(3)/(S(24)*b) - tan(a + b*x)*sec(a + b*x)/(S(16)*b) - atanh(sin(a + b*x))/(S(16)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*cos(a + b*x)**S(5), x), x, cos(a + b*x)**S(8)/(S(8)*b) - cos(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*cos(a + b*x)**S(4), x), x, cos(a + b*x)**S(7)/(S(7)*b) - cos(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*cos(a + b*x)**S(3), x), x, -sin(a + b*x)**S(6)/(S(6)*b) + sin(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*cos(a + b*x)**S(2), x), x, cos(a + b*x)**S(5)/(S(5)*b) - cos(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*cos(a + b*x), x), x, sin(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x), x), x, -log(cos(a + b*x))/b + cos(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(2), x), x, cos(a + b*x)/b + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(3), x), x, log(cos(a + b*x))/b + tan(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(4), x), x, sec(a + b*x)**S(3)/(S(3)*b) - sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(5), x), x, tan(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(6), x), x, sec(a + b*x)**S(5)/(S(5)*b) - sec(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(7), x), x, sec(a + b*x)**S(6)/(S(6)*b) - sec(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(8), x), x, sec(a + b*x)**S(7)/(S(7)*b) - sec(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)*sec(a + b*x)**S(9), x), x, sec(a + b*x)**S(8)/(S(8)*b) - sec(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(7), x), x, -sin(a + b*x)**S(11)/(S(11)*b) + sin(a + b*x)**S(9)/(S(3)*b) - S(3)*sin(a + b*x)**S(7)/(S(7)*b) + sin(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(5), x), x, sin(a + b*x)**S(9)/(S(9)*b) - S(2)*sin(a + b*x)**S(7)/(S(7)*b) + sin(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(3), x), x, -sin(a + b*x)**S(7)/(S(7)*b) + sin(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x), x), x, sin(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(2), x), x, -S(3)*x/S(2) - sin(a + b*x)**S(2)*tan(a + b*x)/(S(2)*b) + S(3)*tan(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(4), x), x, x + tan(a + b*x)**S(3)/(S(3)*b) - tan(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(6), x), x, tan(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(8), x), x, tan(a + b*x)**S(7)/(S(7)*b) + tan(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(10), x), x, tan(a + b*x)**S(9)/(S(9)*b) + S(2)*tan(a + b*x)**S(7)/(S(7)*b) + tan(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(6), x), x, S(3)*x/S(256) - sin(a + b*x)**S(3)*cos(a + b*x)**S(7)/(S(10)*b) - S(3)*sin(a + b*x)*cos(a + b*x)**S(7)/(S(80)*b) + sin(a + b*x)*cos(a + b*x)**S(5)/(S(160)*b) + sin(a + b*x)*cos(a + b*x)**S(3)/(S(128)*b) + S(3)*sin(a + b*x)*cos(a + b*x)/(S(256)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(4), x), x, S(3)*x/S(128) - sin(a + b*x)**S(3)*cos(a + b*x)**S(5)/(S(8)*b) - sin(a + b*x)*cos(a + b*x)**S(5)/(S(16)*b) + sin(a + b*x)*cos(a + b*x)**S(3)/(S(64)*b) + S(3)*sin(a + b*x)*cos(a + b*x)/(S(128)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*cos(a + b*x)**S(2), x), x, x/S(16) - sin(a + b*x)**S(3)*cos(a + b*x)**S(3)/(S(6)*b) - sin(a + b*x)*cos(a + b*x)**S(3)/(S(8)*b) + sin(a + b*x)*cos(a + b*x)/(S(16)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4), x), x, S(3)*x/S(8) - sin(a + b*x)**S(3)*cos(a + b*x)/(S(4)*b) - S(3)*sin(a + b*x)*cos(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x), x), x, -sin(a + b*x)**S(3)/(S(3)*b) - sin(a + b*x)/b + atanh(sin(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(3), x), x, sin(a + b*x)*tan(a + b*x)**S(2)/(S(2)*b) + S(3)*sin(a + b*x)/(S(2)*b) - S(3)*atanh(sin(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(5), x), x, tan(a + b*x)**S(3)*sec(a + b*x)/(S(4)*b) - S(3)*tan(a + b*x)*sec(a + b*x)/(S(8)*b) + S(3)*atanh(sin(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(7), x), x, tan(a + b*x)**S(3)*sec(a + b*x)**S(3)/(S(6)*b) - tan(a + b*x)*sec(a + b*x)**S(3)/(S(8)*b) + tan(a + b*x)*sec(a + b*x)/(S(16)*b) + atanh(sin(a + b*x))/(S(16)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)*sec(a + b*x)**S(9), x), x, tan(a + b*x)**S(3)*sec(a + b*x)**S(5)/(S(8)*b) - tan(a + b*x)*sec(a + b*x)**S(5)/(S(16)*b) + tan(a + b*x)*sec(a + b*x)**S(3)/(S(64)*b) + S(3)*tan(a + b*x)*sec(a + b*x)/(S(128)*b) + S(3)*atanh(sin(a + b*x))/(S(128)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(7), x), x, -cos(a + b*x)**S(12)/(S(12)*b) + cos(a + b*x)**S(10)/(S(5)*b) - cos(a + b*x)**S(8)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(6), x), x, -cos(a + b*x)**S(11)/(S(11)*b) + S(2)*cos(a + b*x)**S(9)/(S(9)*b) - cos(a + b*x)**S(7)/(S(7)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(5), x), x, sin(a + b*x)**S(10)/(S(10)*b) - sin(a + b*x)**S(8)/(S(4)*b) + sin(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(4), x), x, -cos(a + b*x)**S(9)/(S(9)*b) + S(2)*cos(a + b*x)**S(7)/(S(7)*b) - cos(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(3), x), x, -sin(a + b*x)**S(8)/(S(8)*b) + sin(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**S(2), x), x, -cos(a + b*x)**S(7)/(S(7)*b) + S(2)*cos(a + b*x)**S(5)/(S(5)*b) - cos(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x), x), x, sin(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x), x), x, -log(cos(a + b*x))/b - cos(a + b*x)**S(4)/(S(4)*b) + cos(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(2), x), x, -cos(a + b*x)**S(3)/(S(3)*b) + S(2)*cos(a + b*x)/b + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(3), x), x, S(2)*log(cos(a + b*x))/b - cos(a + b*x)**S(2)/(S(2)*b) + sec(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(4), x), x, -cos(a + b*x)/b + sec(a + b*x)**S(3)/(S(3)*b) - S(2)*sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(5), x), x, -log(cos(a + b*x))/b + tan(a + b*x)**S(4)/(S(4)*b) - tan(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(6), x), x, sec(a + b*x)**S(5)/(S(5)*b) - S(2)*sec(a + b*x)**S(3)/(S(3)*b) + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(7), x), x, tan(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(8), x), x, sec(a + b*x)**S(7)/(S(7)*b) - S(2)*sec(a + b*x)**S(5)/(S(5)*b) + sec(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(9), x), x, tan(a + b*x)**S(8)/(S(8)*b) + tan(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(10), x), x, sec(a + b*x)**S(9)/(S(9)*b) - S(2)*sec(a + b*x)**S(7)/(S(7)*b) + sec(a + b*x)**S(5)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(11), x), x, sec(a + b*x)**S(10)/(S(10)*b) - sec(a + b*x)**S(8)/(S(4)*b) + sec(a + b*x)**S(6)/(S(6)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(12), x), x, sec(a + b*x)**S(11)/(S(11)*b) - S(2)*sec(a + b*x)**S(9)/(S(9)*b) + sec(a + b*x)**S(7)/(S(7)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*sec(a + b*x)**S(13), x), x, sec(a + b*x)**S(12)/(S(12)*b) - sec(a + b*x)**S(10)/(S(5)*b) + sec(a + b*x)**S(8)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(6)*sec(a + b*x)**S(3), x), x, sin(a + b*x)**S(3)*tan(a + b*x)**S(2)/(S(2)*b) + S(5)*sin(a + b*x)**S(3)/(S(6)*b) + S(5)*sin(a + b*x)/(S(2)*b) - S(5)*atanh(sin(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(7)*sec(a + b*x)**S(6), x), x, cos(a + b*x)/b + sec(a + b*x)**S(5)/(S(5)*b) - sec(a + b*x)**S(3)/b + S(3)*sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(6)/sin(a + b*x), x), x, cos(a + b*x)**S(5)/(S(5)*b) + cos(a + b*x)**S(3)/(S(3)*b) + cos(a + b*x)/b - atanh(cos(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(5)/sin(a + b*x), x), x, log(sin(a + b*x))/b + sin(a + b*x)**S(4)/(S(4)*b) - sin(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(4)/sin(a + b*x), x), x, cos(a + b*x)**S(3)/(S(3)*b) + cos(a + b*x)/b - atanh(cos(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(3)/sin(a + b*x), x), x, log(sin(a + b*x))/b - sin(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(2)/sin(a + b*x), x), x, cos(a + b*x)/b - atanh(cos(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)/sin(a + b*x), x), x, log(sin(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)/sin(a + b*x), x), x, log(tan(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(2)/sin(a + b*x), x), x, -atanh(cos(a + b*x))/b + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(3)/sin(a + b*x), x), x, log(tan(a + b*x))/b + tan(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(4)/sin(a + b*x), x), x, -atanh(cos(a + b*x))/b + sec(a + b*x)**S(3)/(S(3)*b) + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(5)/sin(a + b*x), x), x, log(tan(a + b*x))/b + tan(a + b*x)**S(4)/(S(4)*b) + tan(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(6)/sin(a + b*x), x), x, -atanh(cos(a + b*x))/b + sec(a + b*x)**S(5)/(S(5)*b) + sec(a + b*x)**S(3)/(S(3)*b) + sec(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(7)/sin(a + b*x), x), x, log(tan(a + b*x))/b + tan(a + b*x)**S(6)/(S(6)*b) + S(3)*tan(a + b*x)**S(4)/(S(4)*b) + S(3)*tan(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(7)/sin(a + b*x)**S(2), x), x, -sin(a + b*x)**S(5)/(S(5)*b) + sin(a + b*x)**S(3)/b - S(3)*sin(a + b*x)/b - csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(6)/sin(a + b*x)**S(2), x), x, -S(15)*x/S(8) + cos(a + b*x)**S(4)*cot(a + b*x)/(S(4)*b) + S(5)*cos(a + b*x)**S(2)*cot(a + b*x)/(S(8)*b) - S(15)*cot(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(5)/sin(a + b*x)**S(2), x), x, sin(a + b*x)**S(3)/(S(3)*b) - S(2)*sin(a + b*x)/b - csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(4)/sin(a + b*x)**S(2), x), x, -S(3)*x/S(2) + cos(a + b*x)**S(2)*cot(a + b*x)/(S(2)*b) - S(3)*cot(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(3)/sin(a + b*x)**S(2), x), x, -sin(a + b*x)/b - csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(2)/sin(a + b*x)**S(2), x), x, -x - cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)/sin(a + b*x)**S(2), x), x, -csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)/sin(a + b*x)**S(2), x), x, atanh(sin(a + b*x))/b - csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(2)/sin(a + b*x)**S(2), x), x, tan(a + b*x)/b - cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(3)/sin(a + b*x)**S(2), x), x, S(3)*atanh(sin(a + b*x))/(S(2)*b) + csc(a + b*x)*sec(a + b*x)**S(2)/(S(2)*b) - S(3)*csc(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(4)/sin(a + b*x)**S(2), x), x, tan(a + b*x)**S(3)/(S(3)*b) + S(2)*tan(a + b*x)/b - cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(5)/sin(a + b*x)**S(2), x), x, S(15)*atanh(sin(a + b*x))/(S(8)*b) + csc(a + b*x)*sec(a + b*x)**S(4)/(S(4)*b) + S(5)*csc(a + b*x)*sec(a + b*x)**S(2)/(S(8)*b) - S(15)*csc(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(7)/sin(a + b*x)**S(3), x), x, -S(3)*log(sin(a + b*x))/b - sin(a + b*x)**S(4)/(S(4)*b) + S(3)*sin(a + b*x)**S(2)/(S(2)*b) - csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(6)/sin(a + b*x)**S(3), x), x, -cos(a + b*x)**S(3)*cot(a + b*x)**S(2)/(S(2)*b) - S(5)*cos(a + b*x)**S(3)/(S(6)*b) - S(5)*cos(a + b*x)/(S(2)*b) + S(5)*atanh(cos(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(5)/sin(a + b*x)**S(3), x), x, -S(2)*log(sin(a + b*x))/b + sin(a + b*x)**S(2)/(S(2)*b) - csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(4)/sin(a + b*x)**S(3), x), x, -cos(a + b*x)*cot(a + b*x)**S(2)/(S(2)*b) - S(3)*cos(a + b*x)/(S(2)*b) + S(3)*atanh(cos(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(3)/sin(a + b*x)**S(3), x), x, -log(sin(a + b*x))/b - cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(2)/sin(a + b*x)**S(3), x), x, -cot(a + b*x)*csc(a + b*x)/(S(2)*b) + atanh(cos(a + b*x))/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)/sin(a + b*x)**S(3), x), x, -csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)/sin(a + b*x)**S(3), x), x, log(tan(a + b*x))/b - cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(2)/sin(a + b*x)**S(3), x), x, -S(3)*atanh(cos(a + b*x))/(S(2)*b) - csc(a + b*x)**S(2)*sec(a + b*x)/(S(2)*b) + S(3)*sec(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(3)/sin(a + b*x)**S(3), x), x, S(2)*log(tan(a + b*x))/b + tan(a + b*x)**S(2)/(S(2)*b) - cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(4)/sin(a + b*x)**S(3), x), x, -S(5)*atanh(cos(a + b*x))/(S(2)*b) - csc(a + b*x)**S(2)*sec(a + b*x)**S(3)/(S(2)*b) + S(5)*sec(a + b*x)**S(3)/(S(6)*b) + S(5)*sec(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(5)/sin(a + b*x)**S(3), x), x, S(3)*log(tan(a + b*x))/b + tan(a + b*x)**S(4)/(S(4)*b) + S(3)*tan(a + b*x)**S(2)/(S(2)*b) - cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(9)/sin(a + b*x)**S(4), x), x, sin(a + b*x)**S(5)/(S(5)*b) - S(4)*sin(a + b*x)**S(3)/(S(3)*b) + S(6)*sin(a + b*x)/b - csc(a + b*x)**S(3)/(S(3)*b) + S(4)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(8)/sin(a + b*x)**S(4), x), x, S(35)*x/S(8) + cos(a + b*x)**S(4)*cot(a + b*x)**S(3)/(S(4)*b) + S(7)*cos(a + b*x)**S(2)*cot(a + b*x)**S(3)/(S(8)*b) - S(35)*cot(a + b*x)**S(3)/(S(24)*b) + S(35)*cot(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(7)/sin(a + b*x)**S(4), x), x, -sin(a + b*x)**S(3)/(S(3)*b) + S(3)*sin(a + b*x)/b - csc(a + b*x)**S(3)/(S(3)*b) + S(3)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(6)/sin(a + b*x)**S(4), x), x, S(5)*x/S(2) + cos(a + b*x)**S(2)*cot(a + b*x)**S(3)/(S(2)*b) - S(5)*cot(a + b*x)**S(3)/(S(6)*b) + S(5)*cot(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(5)/sin(a + b*x)**S(4), x), x, sin(a + b*x)/b - csc(a + b*x)**S(3)/(S(3)*b) + S(2)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(4)/sin(a + b*x)**S(4), x), x, x - cot(a + b*x)**S(3)/(S(3)*b) + cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(3)/sin(a + b*x)**S(4), x), x, -csc(a + b*x)**S(3)/(S(3)*b) + csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(2)/sin(a + b*x)**S(4), x), x, -cot(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)/sin(a + b*x)**S(4), x), x, -csc(a + b*x)**S(3)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)/sin(a + b*x)**S(4), x), x, atanh(sin(a + b*x))/b - csc(a + b*x)**S(3)/(S(3)*b) - csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(2)/sin(a + b*x)**S(4), x), x, tan(a + b*x)/b - cot(a + b*x)**S(3)/(S(3)*b) - S(2)*cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(3)/sin(a + b*x)**S(4), x), x, S(5)*atanh(sin(a + b*x))/(S(2)*b) + csc(a + b*x)**S(3)*sec(a + b*x)**S(2)/(S(2)*b) - S(5)*csc(a + b*x)**S(3)/(S(6)*b) - S(5)*csc(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(4)/sin(a + b*x)**S(4), x), x, tan(a + b*x)**S(3)/(S(3)*b) + S(3)*tan(a + b*x)/b - cot(a + b*x)**S(3)/(S(3)*b) - S(3)*cot(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(5)/sin(a + b*x)**S(4), x), x, S(35)*atanh(sin(a + b*x))/(S(8)*b) + csc(a + b*x)**S(3)*sec(a + b*x)**S(4)/(S(4)*b) + S(7)*csc(a + b*x)**S(3)*sec(a + b*x)**S(2)/(S(8)*b) - S(35)*csc(a + b*x)**S(3)/(S(24)*b) - S(35)*csc(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(9)/sin(a + b*x)**S(5), x), x, S(6)*log(sin(a + b*x))/b + sin(a + b*x)**S(4)/(S(4)*b) - S(2)*sin(a + b*x)**S(2)/b - csc(a + b*x)**S(4)/(S(4)*b) + S(2)*csc(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(8)/sin(a + b*x)**S(5), x), x, -cos(a + b*x)**S(3)*cot(a + b*x)**S(4)/(S(4)*b) + S(7)*cos(a + b*x)**S(3)*cot(a + b*x)**S(2)/(S(8)*b) + S(35)*cos(a + b*x)**S(3)/(S(24)*b) + S(35)*cos(a + b*x)/(S(8)*b) - S(35)*atanh(cos(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(7)/sin(a + b*x)**S(5), x), x, S(3)*log(sin(a + b*x))/b - sin(a + b*x)**S(2)/(S(2)*b) - csc(a + b*x)**S(4)/(S(4)*b) + S(3)*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(6)/sin(a + b*x)**S(5), x), x, -cos(a + b*x)*cot(a + b*x)**S(4)/(S(4)*b) + S(5)*cos(a + b*x)*cot(a + b*x)**S(2)/(S(8)*b) + S(15)*cos(a + b*x)/(S(8)*b) - S(15)*atanh(cos(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(5)/sin(a + b*x)**S(5), x), x, log(sin(a + b*x))/b - cot(a + b*x)**S(4)/(S(4)*b) + cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(4)/sin(a + b*x)**S(5), x), x, -cot(a + b*x)**S(3)*csc(a + b*x)/(S(4)*b) + S(3)*cot(a + b*x)*csc(a + b*x)/(S(8)*b) - S(3)*atanh(cos(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(3)/sin(a + b*x)**S(5), x), x, -cot(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**S(2)/sin(a + b*x)**S(5), x), x, -cot(a + b*x)*csc(a + b*x)**S(3)/(S(4)*b) + cot(a + b*x)*csc(a + b*x)/(S(8)*b) + atanh(cos(a + b*x))/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)/sin(a + b*x)**S(5), x), x, -csc(a + b*x)**S(4)/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)/sin(a + b*x)**S(5), x), x, log(tan(a + b*x))/b - cot(a + b*x)**S(4)/(S(4)*b) - cot(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(2)/sin(a + b*x)**S(5), x), x, -S(15)*atanh(cos(a + b*x))/(S(8)*b) - csc(a + b*x)**S(4)*sec(a + b*x)/(S(4)*b) - S(5)*csc(a + b*x)**S(2)*sec(a + b*x)/(S(8)*b) + S(15)*sec(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(3)/sin(a + b*x)**S(5), x), x, S(3)*log(tan(a + b*x))/b + tan(a + b*x)**S(2)/(S(2)*b) - cot(a + b*x)**S(4)/(S(4)*b) - S(3)*cot(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(4)/sin(a + b*x)**S(5), x), x, -S(35)*atanh(cos(a + b*x))/(S(8)*b) - csc(a + b*x)**S(4)*sec(a + b*x)**S(3)/(S(4)*b) - S(7)*csc(a + b*x)**S(2)*sec(a + b*x)**S(3)/(S(8)*b) + S(35)*sec(a + b*x)**S(3)/(S(24)*b) + S(35)*sec(a + b*x)/(S(8)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(a + b*x)**S(5)/sin(a + b*x)**S(5), x), x, S(6)*log(tan(a + b*x))/b + tan(a + b*x)**S(4)/(S(4)*b) + S(2)*tan(a + b*x)**S(2)/b - cot(a + b*x)**S(4)/(S(4)*b) - S(2)*cot(a + b*x)**S(2)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(x)**S(2)/sin(x)**S(6), x), x, -cot(x)**S(5)/S(5) - cot(x)**S(3)/S(3), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(x)**S(3)/sin(x)**S(7), x), x, -csc(x)**S(6)/S(6) + csc(x)**S(4)/S(4), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*sin(a + b*x), x), x, -S(2)*(d*cos(a + b*x))**(S(5)/2)/(S(5)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*sin(a + b*x), x), x, -S(2)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)/sqrt(d*cos(a + b*x)), x), x, -S(2)*sqrt(d*cos(a + b*x))/(b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)/(d*cos(a + b*x))**(S(3)/2), x), x, S(2)/(b*d*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(9)/2)*sin(a + b*x)**S(2), x), x, S(28)*d**S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(195)*b*sqrt(cos(a + b*x))) + S(28)*d**S(3)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(585)*b) + S(4)*d*(d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)/(S(117)*b) - S(2)*(d*cos(a + b*x))**(S(11)/2)*sin(a + b*x)/(S(13)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)**S(2), x), x, S(20)*d**S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(231)*b*sqrt(d*cos(a + b*x))) + S(20)*d**S(3)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(231)*b) + S(4)*d*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)/(S(77)*b) - S(2)*(d*cos(a + b*x))**(S(9)/2)*sin(a + b*x)/(S(11)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)**S(2), x), x, S(4)*d**S(2)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(15)*b*sqrt(cos(a + b*x))) + S(4)*d*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(45)*b) - S(2)*(d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)/(S(9)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)**S(2), x), x, S(4)*d**S(2)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(21)*b*sqrt(d*cos(a + b*x))) + S(4)*d*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(21)*b) - S(2)*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)/(S(7)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*sin(a + b*x)**S(2), x), x, S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*sqrt(cos(a + b*x))) - S(2)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(5)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)/sqrt(d*cos(a + b*x)), x), x, S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(3)*b*sqrt(d*cos(a + b*x))) - S(2)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(3)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)/(d*cos(a + b*x))**(S(3)/2), x), x, S(2)*sin(a + b*x)/(b*d*sqrt(d*cos(a + b*x))) - S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(b*d**S(2)*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)*sin(a + b*x)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)) - S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(3)*b*d**S(2)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)*sin(a + b*x)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) - S(4)*sin(a + b*x)/(S(5)*b*d**S(3)*sqrt(d*cos(a + b*x))) + S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*d**S(4)*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(2)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)*sin(a + b*x)/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)) - S(4)*sin(a + b*x)/(S(21)*b*d**S(3)*(d*cos(a + b*x))**(S(3)/2)) - S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(21)*b*d**S(4)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*sin(a + b*x)**S(3), x), x, -S(2)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b*d) + S(2)*(d*cos(a + b*x))**(S(7)/2)/(S(7)*b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/sqrt(d*cos(a + b*x)), x), x, -S(2)*sqrt(d*cos(a + b*x))/(b*d) + S(2)*(d*cos(a + b*x))**(S(5)/2)/(S(5)*b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/(d*cos(a + b*x))**(S(3)/2), x), x, S(2)/(b*d*sqrt(d*cos(a + b*x))) + S(2)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)) + S(2)*sqrt(d*cos(a + b*x))/(b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) - S(2)/(b*d**S(3)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)) - S(2)/(S(3)*b*d**S(3)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(3)/(d*cos(a + b*x))**(S(11)/2), x), x, S(2)/(S(9)*b*d*(d*cos(a + b*x))**(S(9)/2)) - S(2)/(S(5)*b*d**S(3)*(d*cos(a + b*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(9)/2)*sin(a + b*x)**S(4), x), x, S(56)*d**S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(1105)*b*sqrt(cos(a + b*x))) + S(56)*d**S(3)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(3315)*b) + S(8)*d*(d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)/(S(663)*b) - S(2)*(d*cos(a + b*x))**(S(11)/2)*sin(a + b*x)**S(3)/(S(17)*b*d) - S(12)*(d*cos(a + b*x))**(S(11)/2)*sin(a + b*x)/(S(221)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)**S(4), x), x, S(8)*d**S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(231)*b*sqrt(d*cos(a + b*x))) + S(8)*d**S(3)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(231)*b) + S(8)*d*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)/(S(385)*b) - S(2)*(d*cos(a + b*x))**(S(9)/2)*sin(a + b*x)**S(3)/(S(15)*b*d) - S(4)*(d*cos(a + b*x))**(S(9)/2)*sin(a + b*x)/(S(55)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)**S(4), x), x, S(8)*d**S(2)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(65)*b*sqrt(cos(a + b*x))) + S(8)*d*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(195)*b) - S(2)*(d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)**S(3)/(S(13)*b*d) - S(4)*(d*cos(a + b*x))**(S(7)/2)*sin(a + b*x)/(S(39)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)**S(4), x), x, S(8)*d**S(2)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(77)*b*sqrt(d*cos(a + b*x))) + S(8)*d*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(77)*b) - S(2)*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)**S(3)/(S(11)*b*d) - S(12)*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)/(S(77)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*sin(a + b*x)**S(4), x), x, S(8)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(15)*b*sqrt(cos(a + b*x))) - S(2)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)**S(3)/(S(9)*b*d) - S(4)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(15)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)/sqrt(d*cos(a + b*x)), x), x, S(8)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(7)*b*sqrt(d*cos(a + b*x))) - S(2)*sqrt(d*cos(a + b*x))*sin(a + b*x)**S(3)/(S(7)*b*d) - S(4)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(7)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)/(d*cos(a + b*x))**(S(3)/2), x), x, S(2)*sin(a + b*x)**S(3)/(b*d*sqrt(d*cos(a + b*x))) - S(24)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*d**S(2)*sqrt(cos(a + b*x))) + S(12)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(5)*b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)*sin(a + b*x)**S(3)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)) - S(8)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(3)*b*d**S(2)*sqrt(d*cos(a + b*x))) + S(4)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(3)*b*d**S(3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)*sin(a + b*x)**S(3)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) - S(12)*sin(a + b*x)/(S(5)*b*d**S(3)*sqrt(d*cos(a + b*x))) + S(24)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*d**S(4)*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(4)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)*sin(a + b*x)**S(3)/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)) - S(4)*sin(a + b*x)/(S(7)*b*d**S(3)*(d*cos(a + b*x))**(S(3)/2)) + S(8)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(7)*b*d**S(4)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**S(5)*cos(a + b*x)**(S(3)/2), x), x, -S(2)*cos(a + b*x)**(S(13)/2)/(S(13)*b) + S(4)*cos(a + b*x)**(S(9)/2)/(S(9)*b) - S(2)*cos(a + b*x)**(S(5)/2)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(9)/2)*csc(a + b*x), x), x, d**(S(9)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/b - d**(S(9)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/b + S(2)*d**S(3)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b) + S(2)*d*(d*cos(a + b*x))**(S(7)/2)/(S(7)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)*csc(a + b*x), x), x, -d**(S(7)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/b - d**(S(7)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/b + S(2)*d**S(3)*sqrt(d*cos(a + b*x))/b + S(2)*d*(d*cos(a + b*x))**(S(5)/2)/(S(5)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(5)/2)*csc(a + b*x), x), x, d**(S(5)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/b - d**(S(5)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/b + S(2)*d*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*csc(a + b*x), x), x, -d**(S(3)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/b - d**(S(3)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/b + S(2)*d*sqrt(d*cos(a + b*x))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*csc(a + b*x), x), x, sqrt(d)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/b - sqrt(d)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)/sqrt(d*cos(a + b*x)), x), x, -ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(b*sqrt(d)) - atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(b*sqrt(d)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)/(d*cos(a + b*x))**(S(3)/2), x), x, S(2)/(b*d*sqrt(d*cos(a + b*x))) + ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(3)/2)) - atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)) - ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(5)/2)) - atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) + S(2)/(b*d**S(3)*sqrt(d*cos(a + b*x))) + ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(7)/2)) - atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)) + S(2)/(S(3)*b*d**S(3)*(d*cos(a + b*x))**(S(3)/2)) - ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(9)/2)) - atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(b*d**(S(9)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(11)/2)*csc(a + b*x)**S(2), x), x, -S(15)*d**S(6)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(7)*b*sqrt(d*cos(a + b*x))) - S(15)*d**S(5)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(7)*b) - S(9)*d**S(3)*(d*cos(a + b*x))**(S(5)/2)*sin(a + b*x)/(S(7)*b) - d*(d*cos(a + b*x))**(S(9)/2)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(9)/2)*csc(a + b*x)**S(2), x), x, -S(21)*d**S(4)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*sqrt(cos(a + b*x))) - S(7)*d**S(3)*(d*cos(a + b*x))**(S(3)/2)*sin(a + b*x)/(S(5)*b) - d*(d*cos(a + b*x))**(S(7)/2)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)*csc(a + b*x)**S(2), x), x, -S(5)*d**S(4)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(3)*b*sqrt(d*cos(a + b*x))) - S(5)*d**S(3)*sqrt(d*cos(a + b*x))*sin(a + b*x)/(S(3)*b) - d*(d*cos(a + b*x))**(S(5)/2)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(5)/2)*csc(a + b*x)**S(2), x), x, -S(3)*d**S(2)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(b*sqrt(cos(a + b*x))) - d*(d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)**S(2), x), x, -d**S(2)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(b*sqrt(d*cos(a + b*x))) - d*sqrt(d*cos(a + b*x))*csc(a + b*x)/b, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*csc(a + b*x)**S(2), x), x, -sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(b*sqrt(cos(a + b*x))) - (d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)/(b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(2)/sqrt(d*cos(a + b*x)), x), x, EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(b*sqrt(d*cos(a + b*x))) - sqrt(d*cos(a + b*x))*csc(a + b*x)/(b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(2)/(d*cos(a + b*x))**(S(3)/2), x), x, S(3)*sin(a + b*x)/(b*d*sqrt(d*cos(a + b*x))) - csc(a + b*x)/(b*d*sqrt(d*cos(a + b*x))) - S(3)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(b*d**S(2)*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(2)/(d*cos(a + b*x))**(S(5)/2), x), x, S(5)*sin(a + b*x)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)) - csc(a + b*x)/(b*d*(d*cos(a + b*x))**(S(3)/2)) + S(5)*EllipticF(a/S(2) + b*x/S(2), S(2))*sqrt(cos(a + b*x))/(S(3)*b*d**S(2)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(2)/(d*cos(a + b*x))**(S(7)/2), x), x, S(7)*sin(a + b*x)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) - csc(a + b*x)/(b*d*(d*cos(a + b*x))**(S(5)/2)) + S(21)*sin(a + b*x)/(S(5)*b*d**S(3)*sqrt(d*cos(a + b*x))) - S(21)*sqrt(d*cos(a + b*x))*EllipticE(a/S(2) + b*x/S(2), S(2))/(S(5)*b*d**S(4)*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(11)/2)*csc(a + b*x)**S(3), x), x, S(9)*d**(S(11)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) + S(9)*d**(S(11)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - S(9)*d**S(5)*sqrt(d*cos(a + b*x))/(S(2)*b) - S(9)*d**S(3)*(d*cos(a + b*x))**(S(5)/2)/(S(10)*b) - d*(d*cos(a + b*x))**(S(9)/2)*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(9)/2)*csc(a + b*x)**S(3), x), x, -S(7)*d**(S(9)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) + S(7)*d**(S(9)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - S(7)*d**S(3)*(d*cos(a + b*x))**(S(3)/2)/(S(6)*b) - d*(d*cos(a + b*x))**(S(7)/2)*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)*csc(a + b*x)**S(3), x), x, S(5)*d**(S(7)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) + S(5)*d**(S(7)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - S(5)*d**S(3)*sqrt(d*cos(a + b*x))/(S(2)*b) - d*(d*cos(a + b*x))**(S(5)/2)*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(5)/2)*csc(a + b*x)**S(3), x), x, -S(3)*d**(S(5)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) + S(3)*d**(S(5)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - d*(d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)**S(3), x), x, d**(S(3)/2)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) + d**(S(3)/2)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - d*sqrt(d*cos(a + b*x))*csc(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))*csc(a + b*x)**S(3), x), x, sqrt(d)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - sqrt(d)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b) - (d*cos(a + b*x))**(S(3)/2)*csc(a + b*x)**S(2)/(S(2)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(3)/sqrt(d*cos(a + b*x)), x), x, -sqrt(d*cos(a + b*x))*csc(a + b*x)**S(2)/(S(2)*b*d) - S(3)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*sqrt(d)) - S(3)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*sqrt(d)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(3)/(d*cos(a + b*x))**(S(3)/2), x), x, -csc(a + b*x)**S(2)/(S(2)*b*d*sqrt(d*cos(a + b*x))) + S(5)/(S(2)*b*d*sqrt(d*cos(a + b*x))) + S(5)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(3)/2)) - S(5)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(3)/(d*cos(a + b*x))**(S(5)/2), x), x, -csc(a + b*x)**S(2)/(S(2)*b*d*(d*cos(a + b*x))**(S(3)/2)) + S(7)/(S(6)*b*d*(d*cos(a + b*x))**(S(3)/2)) - S(7)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(5)/2)) - S(7)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(a + b*x)**S(3)/(d*cos(a + b*x))**(S(7)/2), x), x, -csc(a + b*x)**S(2)/(S(2)*b*d*(d*cos(a + b*x))**(S(5)/2)) + S(9)/(S(10)*b*d*(d*cos(a + b*x))**(S(5)/2)) + S(9)/(S(2)*b*d**S(3)*sqrt(d*cos(a + b*x))) + S(9)*ArcTan(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(7)/2)) - S(9)*atanh(sqrt(d*cos(a + b*x))/sqrt(d))/(S(4)*b*d**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(1)/5)*sin(a + b*x), x), x, -S(5)*(d*cos(a + b*x))**(S(6)/5)/(S(6)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(sin(x))*cos(x)**S(3), x), x, -S(2)*sin(x)**(S(7)/2)/S(7) + S(2)*sin(x)**(S(3)/2)/S(3), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(x)**(S(3)/2)*cos(x)**S(3), x), x, -S(2)*sin(x)**(S(9)/2)/S(9) + S(2)*sin(x)**(S(5)/2)/S(5), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(x)**(S(5)/2)*cos(x)**S(3), x), x, -S(2)*sin(x)**(S(11)/2)/S(11) + S(2)*sin(x)**(S(7)/2)/S(7), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(x)**S(3)/sqrt(sin(x)), x), x, -S(2)*sin(x)**(S(5)/2)/S(5) + S(2)*sqrt(sin(x)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(9)/2), x), x, S(7)*d**S(4)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(20)*b*sqrt(sin(S(2)*a + S(2)*b*x))) + S(7)*d**S(3)*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2)/(S(30)*b*c) + d*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(7)/2)/(S(5)*b*c), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(5)/2), x), x, d**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(2)*b*sqrt(sin(S(2)*a + S(2)*b*x))) + d*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b*c), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x)), x), x, sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(b*sqrt(sin(S(2)*a + S(2)*b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/(d*cos(a + b*x))**(S(3)/2), x), x, -S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(b*d**S(2)*sqrt(sin(S(2)*a + S(2)*b*x))) + S(2)*(c*sin(a + b*x))**(S(3)/2)/(b*c*d*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/(d*cos(a + b*x))**(S(7)/2), x), x, -S(4)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(5)*b*d**S(4)*sqrt(sin(S(2)*a + S(2)*b*x))) + S(2)*(c*sin(a + b*x))**(S(3)/2)/(S(5)*b*c*d*(d*cos(a + b*x))**(S(5)/2)) + S(4)*(c*sin(a + b*x))**(S(3)/2)/(S(5)*b*c*d**S(3)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(3)/2), x), x, -sqrt(S(2))*sqrt(c)*d**(S(3)/2)*ArcTan(S(1) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(8)*b) + sqrt(S(2))*sqrt(c)*d**(S(3)/2)*ArcTan(S(1) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(8)*b) + sqrt(S(2))*sqrt(c)*d**(S(3)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(16)*b) - sqrt(S(2))*sqrt(c)*d**(S(3)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(16)*b) + d*(c*sin(a + b*x))**(S(3)/2)*sqrt(d*cos(a + b*x))/(S(2)*b*c), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)), x), x, -sqrt(S(2))*sqrt(c)*ArcTan(S(1) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(2)*b*sqrt(d)) + sqrt(S(2))*sqrt(c)*ArcTan(S(1) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(2)*b*sqrt(d)) + sqrt(S(2))*sqrt(c)*log(sqrt(c)*tan(a + b*x) + sqrt(c) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(4)*b*sqrt(d)) - sqrt(S(2))*sqrt(c)*log(sqrt(c)*tan(a + b*x) + sqrt(c) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(4)*b*sqrt(d)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/(d*cos(a + b*x))**(S(5)/2), x), x, S(2)*(c*sin(a + b*x))**(S(3)/2)/(S(3)*b*c*d*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)*(c*sin(a + b*x))**(S(3)/2)/(S(7)*b*c*d*(d*cos(a + b*x))**(S(7)/2)) + S(8)*(c*sin(a + b*x))**(S(3)/2)/(S(21)*b*c*d**S(3)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))/(d*cos(a + b*x))**(S(13)/2), x), x, S(2)*(c*sin(a + b*x))**(S(3)/2)/(S(11)*b*c*d*(d*cos(a + b*x))**(S(11)/2)) + S(16)*(c*sin(a + b*x))**(S(3)/2)/(S(77)*b*c*d**S(3)*(d*cos(a + b*x))**(S(7)/2)) + S(64)*(c*sin(a + b*x))**(S(3)/2)/(S(231)*b*c*d**S(5)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2), x), x, c**S(2)*d**S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(12)*b*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + c*d*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))/(S(6)*b) - c*sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(5)/2)/(S(3)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/sqrt(d*cos(a + b*x)), x), x, c**S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(2)*b*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) - c*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))/(b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(5)/2), x), x, -c**S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(3)*b*d**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + S(2)*c*sqrt(c*sin(a + b*x))/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(9)/2), x), x, -S(2)*c**S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(21)*b*d**S(4)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + S(2)*c*sqrt(c*sin(a + b*x))/(S(7)*b*d*(d*cos(a + b*x))**(S(7)/2)) - S(2)*c*sqrt(c*sin(a + b*x))/(S(21)*b*d**S(3)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)*sqrt(d*cos(a + b*x)), x), x, sqrt(S(2))*c**(S(3)/2)*sqrt(d)*ArcTan(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(8)*b) - sqrt(S(2))*c**(S(3)/2)*sqrt(d)*ArcTan(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(8)*b) - sqrt(S(2))*c**(S(3)/2)*sqrt(d)*log(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(16)*b) + sqrt(S(2))*c**(S(3)/2)*sqrt(d)*log(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(16)*b) - c*sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(3)/2)/(S(2)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(3)/2), x), x, -sqrt(S(2))*c**(S(3)/2)*ArcTan(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(2)*b*d**(S(3)/2)) + sqrt(S(2))*c**(S(3)/2)*ArcTan(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(2)*b*d**(S(3)/2)) + sqrt(S(2))*c**(S(3)/2)*log(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(4)*b*d**(S(3)/2)) - sqrt(S(2))*c**(S(3)/2)*log(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(4)*b*d**(S(3)/2)) + S(2)*c*sqrt(c*sin(a + b*x))/(b*d*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(7)/2), x), x, S(2)*(c*sin(a + b*x))**(S(5)/2)/(S(5)*b*c*d*(d*cos(a + b*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(11)/2), x), x, S(2)*c*sqrt(c*sin(a + b*x))/(S(9)*b*d*(d*cos(a + b*x))**(S(9)/2)) - S(2)*c*sqrt(c*sin(a + b*x))/(S(45)*b*d**S(3)*(d*cos(a + b*x))**(S(5)/2)) - S(8)*c*sqrt(c*sin(a + b*x))/(S(45)*b*d**S(5)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)/(d*cos(a + b*x))**(S(15)/2), x), x, S(2)*c*sqrt(c*sin(a + b*x))/(S(13)*b*d*(d*cos(a + b*x))**(S(13)/2)) - S(2)*c*sqrt(c*sin(a + b*x))/(S(117)*b*d**S(3)*(d*cos(a + b*x))**(S(9)/2)) - S(16)*c*sqrt(c*sin(a + b*x))/(S(585)*b*d**S(5)*(d*cos(a + b*x))**(S(5)/2)) - S(64)*c*sqrt(c*sin(a + b*x))/(S(585)*b*d**S(7)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)*(d*cos(a + b*x))**(S(9)/2), x), x, S(3)*c**S(2)*d**S(4)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(40)*b*sqrt(sin(S(2)*a + S(2)*b*x))) + c*d**S(3)*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2)/(S(20)*b) + S(3)*c*d*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(7)/2)/(S(70)*b) - c*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(11)/2)/(S(7)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)*(d*cos(a + b*x))**(S(5)/2), x), x, S(3)*c**S(2)*d**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(20)*b*sqrt(sin(S(2)*a + S(2)*b*x))) + c*d*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2)/(S(10)*b) - c*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(7)/2)/(S(5)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)*sqrt(d*cos(a + b*x)), x), x, c**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(2)*b*sqrt(sin(S(2)*a + S(2)*b*x))) - c*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(S(3)/2)/(S(3)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(3)/2), x), x, -S(3)*c**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(b*d**S(2)*sqrt(sin(S(2)*a + S(2)*b*x))) + S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(b*d*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(7)/2), x), x, S(6)*c**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(5)*b*d**S(4)*sqrt(sin(S(2)*a + S(2)*b*x))) + S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(5)*b*d*(d*cos(a + b*x))**(S(5)/2)) - S(6)*c*(c*sin(a + b*x))**(S(3)/2)/(S(5)*b*d**S(3)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(11)/2), x), x, S(4)*c**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))*EllipticE(-Pi/S(4) + a + b*x, S(2))/(S(15)*b*d**S(6)*sqrt(sin(S(2)*a + S(2)*b*x))) + S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(9)*b*d*(d*cos(a + b*x))**(S(9)/2)) - S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(15)*b*d**S(3)*(d*cos(a + b*x))**(S(5)/2)) - S(4)*c*(c*sin(a + b*x))**(S(3)/2)/(S(15)*b*d**S(5)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/sqrt(d*cos(a + b*x)), x), x, -S(3)*sqrt(S(2))*c**(S(5)/2)*ArcTan(S(1) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(8)*b*sqrt(d)) + S(3)*sqrt(S(2))*c**(S(5)/2)*ArcTan(S(1) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(8)*b*sqrt(d)) + S(3)*sqrt(S(2))*c**(S(5)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(16)*b*sqrt(d)) - S(3)*sqrt(S(2))*c**(S(5)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(16)*b*sqrt(d)) - c*(c*sin(a + b*x))**(S(3)/2)*sqrt(d*cos(a + b*x))/(S(2)*b*d), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(5)/2), x), x, sqrt(S(2))*c**(S(5)/2)*ArcTan(S(1) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(2)*b*d**(S(5)/2)) - sqrt(S(2))*c**(S(5)/2)*ArcTan(S(1) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/(sqrt(c)*sqrt(d*cos(a + b*x))))/(S(2)*b*d**(S(5)/2)) - sqrt(S(2))*c**(S(5)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) - sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(4)*b*d**(S(5)/2)) + sqrt(S(2))*c**(S(5)/2)*log(sqrt(c)*tan(a + b*x) + sqrt(c) + sqrt(S(2))*sqrt(d)*sqrt(c*sin(a + b*x))/sqrt(d*cos(a + b*x)))/(S(4)*b*d**(S(5)/2)) + S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(3)*b*d*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(9)/2), x), x, S(2)*(c*sin(a + b*x))**(S(7)/2)/(S(7)*b*c*d*(d*cos(a + b*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(13)/2), x), x, S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(11)*b*d*(d*cos(a + b*x))**(S(11)/2)) - S(6)*c*(c*sin(a + b*x))**(S(3)/2)/(S(77)*b*d**S(3)*(d*cos(a + b*x))**(S(7)/2)) - S(8)*c*(c*sin(a + b*x))**(S(3)/2)/(S(77)*b*d**S(5)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)/(d*cos(a + b*x))**(S(17)/2), x), x, S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(15)*b*d*(d*cos(a + b*x))**(S(15)/2)) - S(2)*c*(c*sin(a + b*x))**(S(3)/2)/(S(55)*b*d**S(3)*(d*cos(a + b*x))**(S(11)/2)) - S(16)*c*(c*sin(a + b*x))**(S(3)/2)/(S(385)*b*d**S(5)*(d*cos(a + b*x))**(S(7)/2)) - S(64)*c*(c*sin(a + b*x))**(S(3)/2)/(S(1155)*b*d**S(7)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(7)/2)/cos(a + b*x)**(S(7)/2), x), x, sqrt(S(2))*ArcTan(S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) - sqrt(S(2))*ArcTan(S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) - sqrt(S(2))*log(cot(a + b*x) + S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b) + sqrt(S(2))*log(cot(a + b*x) + S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b) + S(2)*sin(a + b*x)**(S(5)/2)/(S(5)*b*cos(a + b*x)**(S(5)/2)) - S(2)*sqrt(sin(a + b*x))/(b*sqrt(cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(x)**(S(3)/2)/cos(x)**(S(7)/2), x), x, S(2)*sin(x)**(S(5)/2)/(S(5)*cos(x)**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(sin(x))/sqrt(cos(x)), x), x, -sqrt(S(2))*ArcTan(-sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + S(1))/S(2) + sqrt(S(2))*ArcTan(sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + S(1))/S(2) + sqrt(S(2))*log(-sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + tan(x) + S(1))/S(4) - sqrt(S(2))*log(sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + tan(x) + S(1))/S(4), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(x)**(S(5)/2)/sqrt(cos(x)), x), x, -S(3)*sqrt(S(2))*ArcTan(-sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + S(1))/S(8) + S(3)*sqrt(S(2))*ArcTan(sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + S(1))/S(8) + S(3)*sqrt(S(2))*log(-sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + tan(x) + S(1))/S(16) - S(3)*sqrt(S(2))*log(sqrt(S(2))*sqrt(sin(x))/sqrt(cos(x)) + tan(x) + S(1))/S(16) - sin(x)**(S(3)/2)*sqrt(cos(x))/S(2), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(7)/2)/sqrt(c*sin(a + b*x)), x), x, S(5)*d**S(4)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(12)*b*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + S(5)*d**S(3)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))/(S(6)*b*c) + d*sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(5)/2)/(S(3)*b*c), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**(S(3)/2)/sqrt(c*sin(a + b*x)), x), x, d**S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(2)*b*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + d*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))/(b*c), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))), x), x, EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(b*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(5)/2)), x), x, S(2)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(3)*b*d**S(2)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + S(2)*sqrt(c*sin(a + b*x))/(S(3)*b*c*d*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(9)/2)), x), x, S(4)*EllipticF(-Pi/S(4) + a + b*x, S(2))*sqrt(sin(S(2)*a + S(2)*b*x))/(S(7)*b*d**S(4)*sqrt(c*sin(a + b*x))*sqrt(d*cos(a + b*x))) + S(2)*sqrt(c*sin(a + b*x))/(S(7)*b*c*d*(d*cos(a + b*x))**(S(7)/2)) + S(4)*sqrt(c*sin(a + b*x))/(S(7)*b*c*d**S(3)*(d*cos(a + b*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)), x), x, sqrt(S(2))*sqrt(d)*ArcTan(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(2)*b*sqrt(c)) - sqrt(S(2))*sqrt(d)*ArcTan(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/(sqrt(d)*sqrt(c*sin(a + b*x))) + S(1))/(S(2)*b*sqrt(c)) - sqrt(S(2))*sqrt(d)*log(-sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(4)*b*sqrt(c)) + sqrt(S(2))*sqrt(d)*log(sqrt(S(2))*sqrt(c)*sqrt(d*cos(a + b*x))/sqrt(c*sin(a + b*x)) + sqrt(d)*cot(a + b*x) + sqrt(d))/(S(4)*b*sqrt(c)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(3)/2)), x), x, S(2)*sqrt(c*sin(a + b*x))/(b*c*d*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(7)/2)), x), x, S(2)*sqrt(c*sin(a + b*x))/(S(5)*b*c*d*(d*cos(a + b*x))**(S(5)/2)) + S(8)*sqrt(c*sin(a + b*x))/(S(5)*b*c*d**S(3)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(S(11)/2)), x), x, S(2)*sqrt(c*sin(a + b*x))/(S(9)*b*c*d*(d*cos(a + b*x))**(S(9)/2)) + S(16)*sqrt(c*sin(a + b*x))/(S(45)*b*c*d**S(3)*(d*cos(a + b*x))**(S(5)/2)) + S(64)*sqrt(c*sin(a + b*x))/(S(45)*b*c*d**S(5)*sqrt(d*cos(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(cos(a + b*x))/sqrt(sin(a + b*x)), x), x, sqrt(S(2))*ArcTan(S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) - sqrt(S(2))*ArcTan(S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) - sqrt(S(2))*log(cot(a + b*x) + S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b) + sqrt(S(2))*log(cot(a + b*x) + S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(3)/2)/sin(a + b*x)**(S(3)/2), x), x, sqrt(S(2))*ArcTan(-sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + S(1))/(S(2)*b) - sqrt(S(2))*ArcTan(sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + S(1))/(S(2)*b) - sqrt(S(2))*log(-sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + tan(a + b*x) + S(1))/(S(4)*b) + sqrt(S(2))*log(sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + tan(a + b*x) + S(1))/(S(4)*b) - S(2)*sqrt(cos(a + b*x))/(b*sqrt(sin(a + b*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(5)/2)/sin(a + b*x)**(S(5)/2), x), x, -sqrt(S(2))*ArcTan(S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) + sqrt(S(2))*ArcTan(S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(2)*b) + sqrt(S(2))*log(cot(a + b*x) + S(1) - sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b) - sqrt(S(2))*log(cot(a + b*x) + S(1) + sqrt(S(2))*sqrt(cos(a + b*x))/sqrt(sin(a + b*x)))/(S(4)*b) - S(2)*cos(a + b*x)**(S(3)/2)/(S(3)*b*sin(a + b*x)**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(7)/2)/sin(a + b*x)**(S(7)/2), x), x, -sqrt(S(2))*ArcTan(-sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + S(1))/(S(2)*b) + sqrt(S(2))*ArcTan(sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + S(1))/(S(2)*b) + sqrt(S(2))*log(-sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + tan(a + b*x) + S(1))/(S(4)*b) - sqrt(S(2))*log(sqrt(S(2))*sqrt(sin(a + b*x))/sqrt(cos(a + b*x)) + tan(a + b*x) + S(1))/(S(4)*b) + S(2)*sqrt(cos(a + b*x))/(b*sqrt(sin(a + b*x))) - S(2)*cos(a + b*x)**(S(5)/2)/(S(5)*b*sin(a + b*x)**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(1)/3)*cos(e + f*x)**S(4), x), x, S(3)*(b*sin(e + f*x))**(S(4)/3)*Hypergeometric2F1(S(-3)/2, S(2)/3, S(5)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(4)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(1)/3)*cos(e + f*x)**S(2), x), x, S(3)*(b*sin(e + f*x))**(S(4)/3)*Hypergeometric2F1(S(-1)/2, S(2)/3, S(5)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(4)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(4)/3)*Hypergeometric2F1(S(1)/2, S(2)/3, S(5)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(4)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(1)/3)*sec(e + f*x)**S(2), x), x, S(3)*(b*sin(e + f*x))**(S(4)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(2)/3, S(3)/2, S(5)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(4)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(1)/3)*sec(e + f*x)**S(4), x), x, S(3)*(b*sin(e + f*x))**(S(4)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(2)/3, S(5)/2, S(5)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(4)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(5)/3)*cos(e + f*x)**S(4), x), x, S(3)*(b*sin(e + f*x))**(S(8)/3)*Hypergeometric2F1(S(-3)/2, S(4)/3, S(7)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(8)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(5)/3)*cos(e + f*x)**S(2), x), x, S(3)*(b*sin(e + f*x))**(S(8)/3)*Hypergeometric2F1(S(-1)/2, S(4)/3, S(7)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(8)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(5)/3), x), x, S(3)*(b*sin(e + f*x))**(S(8)/3)*Hypergeometric2F1(S(1)/2, S(4)/3, S(7)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(8)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(5)/3)*sec(e + f*x)**S(2), x), x, S(3)*(b*sin(e + f*x))**(S(8)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(4)/3, S(3)/2, S(7)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(8)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(5)/3)*sec(e + f*x)**S(4), x), x, S(3)*(b*sin(e + f*x))**(S(8)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(4)/3, S(5)/2, S(7)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(8)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(e + f*x)**S(4)/(b*sin(e + f*x))**(S(1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(2)/3)*Hypergeometric2F1(S(-3)/2, S(1)/3, S(4)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(e + f*x)**S(2)/(b*sin(e + f*x))**(S(1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(2)/3)*Hypergeometric2F1(S(-1)/2, S(1)/3, S(4)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(-1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(2)/3)*Hypergeometric2F1(S(1)/3, S(1)/2, S(4)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(e + f*x)**S(2)/(b*sin(e + f*x))**(S(1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(2)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(1)/3, S(3)/2, S(4)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(2)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(e + f*x)**S(4)/(b*sin(e + f*x))**(S(1)/3), x), x, S(3)*(b*sin(e + f*x))**(S(2)/3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(1)/3, S(5)/2, S(4)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(2)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(e + f*x)**S(4)/(b*sin(e + f*x))**(S(5)/3), x), x, -S(3)*Hypergeometric2F1(S(-3)/2, S(-1)/3, S(2)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*(b*sin(e + f*x))**(S(2)/3)*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(e + f*x)**S(2)/(b*sin(e + f*x))**(S(5)/3), x), x, -S(3)*Hypergeometric2F1(S(-1)/2, S(-1)/3, S(2)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*(b*sin(e + f*x))**(S(2)/3)*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**(S(-5)/3), x), x, -S(3)*Hypergeometric2F1(S(-1)/3, S(1)/2, S(2)/3, sin(e + f*x)**S(2))*cos(e + f*x)/(S(2)*b*f*(b*sin(e + f*x))**(S(2)/3)*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(e + f*x)**S(2)/(b*sin(e + f*x))**(S(5)/3), x), x, -S(3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(-1)/3, S(3)/2, S(2)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(2)*b*f*(b*sin(e + f*x))**(S(2)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sec(e + f*x)**S(4)/(b*sin(e + f*x))**(S(5)/3), x), x, -S(3)*sqrt(cos(e + f*x)**S(2))*Hypergeometric2F1(S(-1)/3, S(5)/2, S(2)/3, sin(e + f*x)**S(2))*sec(e + f*x)/(S(2)*b*f*(b*sin(e + f*x))**(S(2)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3), x), x, -sqrt(S(3))*ArcTan(sqrt(S(3))*(-S(2)*sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/S(3))/(S(2)*b) - log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(2)*b) + log(sin(a + b*x)**(S(4)/3)/cos(a + b*x)**(S(4)/3) - sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3), x), x, ArcTan(sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3))/b - ArcTan(-S(2)*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + sqrt(S(3)))/(S(2)*b) + ArcTan(S(2)*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + sqrt(S(3)))/(S(2)*b) + sqrt(S(3))*log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) - sqrt(S(3))*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + S(1))/(S(4)*b) - sqrt(S(3))*log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + sqrt(S(3))*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + S(1))/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(4)/3)/cos(a + b*x)**(S(4)/3), x), x, ArcTan(cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/b - ArcTan(sqrt(S(3)) - S(2)*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/(S(2)*b) + ArcTan(sqrt(S(3)) + S(2)*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/(S(2)*b) + sqrt(S(3))*log(S(1) - sqrt(S(3))*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(4)*b) - sqrt(S(3))*log(S(1) + sqrt(S(3))*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(4)*b) + S(3)*sin(a + b*x)**(S(1)/3)/(b*cos(a + b*x)**(S(1)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(5)/3)/cos(a + b*x)**(S(5)/3), x), x, -sqrt(S(3))*ArcTan(sqrt(S(3))*(S(1) - S(2)*cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/S(3))/(S(2)*b) - log(S(1) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(2)*b) + log(S(1) - cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3) + cos(a + b*x)**(S(4)/3)/sin(a + b*x)**(S(4)/3))/(S(4)*b) + S(3)*sin(a + b*x)**(S(2)/3)/(S(2)*b*cos(a + b*x)**(S(2)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(a + b*x)**(S(7)/3)/cos(a + b*x)**(S(7)/3), x), x, sqrt(S(3))*ArcTan(sqrt(S(3))*(-S(2)*sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/S(3))/(S(2)*b) + log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(2)*b) - log(sin(a + b*x)**(S(4)/3)/cos(a + b*x)**(S(4)/3) - sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(4)*b) + S(3)*sin(a + b*x)**(S(4)/3)/(S(4)*b*cos(a + b*x)**(S(4)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3), x), x, sqrt(S(3))*ArcTan(sqrt(S(3))*(S(1) - S(2)*cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/S(3))/(S(2)*b) + log(S(1) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(2)*b) - log(S(1) - cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3) + cos(a + b*x)**(S(4)/3)/sin(a + b*x)**(S(4)/3))/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3), x), x, -ArcTan(cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/b + ArcTan(sqrt(S(3)) - S(2)*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/(S(2)*b) - ArcTan(sqrt(S(3)) + S(2)*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3))/(S(2)*b) - sqrt(S(3))*log(S(1) - sqrt(S(3))*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(4)*b) + sqrt(S(3))*log(S(1) + sqrt(S(3))*cos(a + b*x)**(S(1)/3)/sin(a + b*x)**(S(1)/3) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(4)*b), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(4)/3)/sin(a + b*x)**(S(4)/3), x), x, -ArcTan(sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3))/b + ArcTan(-S(2)*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + sqrt(S(3)))/(S(2)*b) - ArcTan(S(2)*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + sqrt(S(3)))/(S(2)*b) - sqrt(S(3))*log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) - sqrt(S(3))*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + S(1))/(S(4)*b) + sqrt(S(3))*log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + sqrt(S(3))*sin(a + b*x)**(S(1)/3)/cos(a + b*x)**(S(1)/3) + S(1))/(S(4)*b) - S(3)*cos(a + b*x)**(S(1)/3)/(b*sin(a + b*x)**(S(1)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(5)/3)/sin(a + b*x)**(S(5)/3), x), x, sqrt(S(3))*ArcTan(sqrt(S(3))*(-S(2)*sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/S(3))/(S(2)*b) + log(sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(2)*b) - log(sin(a + b*x)**(S(4)/3)/cos(a + b*x)**(S(4)/3) - sin(a + b*x)**(S(2)/3)/cos(a + b*x)**(S(2)/3) + S(1))/(S(4)*b) - S(3)*cos(a + b*x)**(S(2)/3)/(S(2)*b*sin(a + b*x)**(S(2)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(a + b*x)**(S(7)/3)/sin(a + b*x)**(S(7)/3), x), x, -sqrt(S(3))*ArcTan(sqrt(S(3))*(S(1) - S(2)*cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/S(3))/(S(2)*b) - log(S(1) + cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3))/(S(2)*b) + log(S(1) - cos(a + b*x)**(S(2)/3)/sin(a + b*x)**(S(2)/3) + cos(a + b*x)**(S(4)/3)/sin(a + b*x)**(S(4)/3))/(S(4)*b) - S(3)*cos(a + b*x)**(S(4)/3)/(S(4)*b*sin(a + b*x)**(S(4)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(cos(x)**(S(2)/3)/sin(x)**(S(8)/3), x), x, -S(3)*cos(x)**(S(5)/3)/(S(5)*sin(x)**(S(5)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(x)**(S(2)/3)/cos(x)**(S(8)/3), x), x, S(3)*sin(x)**(S(5)/3)/(S(5)*cos(x)**(S(5)/3)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**m*cos(e + f*x)**n, x), x, (cos(e + f*x)**S(2))**(-n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, -n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))*sin(e + f*x)**(m + S(1))*cos(e + f*x)**(n + S(-1))/(f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(e + f*x))**n*sin(e + f*x)**m, x), x, -(d*cos(e + f*x))**(n + S(1))*(sin(e + f*x)**S(2))**(-m/S(2) + S(1)/2)*Hypergeometric2F1(-m/S(2) + S(1)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)**(m + S(-1))/(d*f*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**m*cos(e + f*x)**n, x), x, (b*sin(e + f*x))**(m + S(1))*(cos(e + f*x)**S(2))**(-n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, -n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))*cos(e + f*x)**(n + S(-1))/(b*f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sin(e + f*x))**m*(d*cos(e + f*x))**n, x), x, d*(b*sin(e + f*x))**(m + S(1))*(d*cos(e + f*x))**(n + S(-1))*(cos(e + f*x)**S(2))**(-n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, -n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))/(b*f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*cos(a + b*x)**S(5), x), x, (c*sin(a + b*x))**(m + S(1))/(b*c*(m + S(1))) - S(2)*(c*sin(a + b*x))**(m + S(3))/(b*c**S(3)*(m + S(3))) + (c*sin(a + b*x))**(m + S(5))/(b*c**S(5)*(m + S(5))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*cos(a + b*x)**S(3), x), x, (c*sin(a + b*x))**(m + S(1))/(b*c*(m + S(1))) - (c*sin(a + b*x))**(m + S(3))/(b*c**S(3)*(m + S(3))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*cos(a + b*x), x), x, (c*sin(a + b*x))**(m + S(1))/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sec(a + b*x), x), x, (c*sin(a + b*x))**(m + S(1))*Hypergeometric2F1(S(1), m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sec(a + b*x)**S(3), x), x, (c*sin(a + b*x))**(m + S(1))*Hypergeometric2F1(S(2), m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*cos(a + b*x)**S(4), x), x, (c*sin(a + b*x))**(m + S(1))*Hypergeometric2F1(S(-3)/2, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*(m + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*cos(a + b*x)**S(2), x), x, (c*sin(a + b*x))**(m + S(1))*Hypergeometric2F1(S(-1)/2, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*(m + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m, x), x, (c*sin(a + b*x))**(m + S(1))*Hypergeometric2F1(S(1)/2, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*(m + S(1))*sqrt(cos(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sec(a + b*x)**S(2), x), x, (c*sin(a + b*x))**(m + S(1))*sqrt(cos(a + b*x)**S(2))*Hypergeometric2F1(S(3)/2, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*sec(a + b*x)/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sec(a + b*x)**S(4), x), x, (c*sin(a + b*x))**(m + S(1))*sqrt(cos(a + b*x)**S(2))*Hypergeometric2F1(S(5)/2, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*sec(a + b*x)/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*(d*cos(a + b*x))**(S(3)/2), x), x, d*(c*sin(a + b*x))**(m + S(1))*sqrt(d*cos(a + b*x))*Hypergeometric2F1(S(-1)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*(m + S(1))*(cos(a + b*x)**S(2))**(S(1)/4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sqrt(d*cos(a + b*x)), x), x, d*(c*sin(a + b*x))**(m + S(1))*(cos(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(1)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*sqrt(d*cos(a + b*x))*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m/sqrt(d*cos(a + b*x)), x), x, d*(c*sin(a + b*x))**(m + S(1))*(cos(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(3)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*(d*cos(a + b*x))**(S(3)/2)*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m/(d*cos(a + b*x))**(S(3)/2), x), x, (c*sin(a + b*x))**(m + S(1))*(cos(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(5)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*d*sqrt(d*cos(a + b*x))*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m/(d*cos(a + b*x))**(S(5)/2), x), x, (c*sin(a + b*x))**(m + S(1))*(cos(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(7)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*d*(d*cos(a + b*x))**(S(3)/2)*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*sin(a + b*x)**S(5), x), x, -(d*cos(a + b*x))**(n + S(1))/(b*d*(n + S(1))) + S(2)*(d*cos(a + b*x))**(n + S(3))/(b*d**S(3)*(n + S(3))) - (d*cos(a + b*x))**(n + S(5))/(b*d**S(5)*(n + S(5))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*sin(a + b*x)**S(3), x), x, -(d*cos(a + b*x))**(n + S(1))/(b*d*(n + S(1))) + (d*cos(a + b*x))**(n + S(3))/(b*d**S(3)*(n + S(3))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*sin(a + b*x), x), x, -(d*cos(a + b*x))**(n + S(1))/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*csc(a + b*x), x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(1), n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*csc(a + b*x)**S(3), x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(2), n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*csc(a + b*x)**S(5), x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(3), n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*sin(a + b*x)**S(4), x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(-3)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))*sin(a + b*x)/(b*d*(n + S(1))*sqrt(sin(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*sin(a + b*x)**S(2), x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(-1)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))*sin(a + b*x)/(b*d*(n + S(1))*sqrt(sin(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n, x), x, -(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(1)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))*sin(a + b*x)/(b*d*(n + S(1))*sqrt(sin(a + b*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*csc(a + b*x)**S(2), x), x, -(d*cos(a + b*x))**(n + S(1))*sqrt(sin(a + b*x)**S(2))*Hypergeometric2F1(S(3)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))*csc(a + b*x)/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n*csc(a + b*x)**S(4), x), x, -(d*cos(a + b*x))**(n + S(1))*sqrt(sin(a + b*x)**S(2))*Hypergeometric2F1(S(5)/2, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))*csc(a + b*x)/(b*d*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(5)/2)*(d*cos(a + b*x))**n, x), x, -c*(c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(-3)/4, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(n + S(1))*(sin(a + b*x)**S(2))**(S(3)/4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**(S(3)/2)*(d*cos(a + b*x))**n, x), x, -c*sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**(n + S(1))*Hypergeometric2F1(S(-1)/4, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(n + S(1))*(sin(a + b*x)**S(2))**(S(1)/4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(c*sin(a + b*x))*(d*cos(a + b*x))**n, x), x, -c*(d*cos(a + b*x))**(n + S(1))*(sin(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(1)/4, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*sqrt(c*sin(a + b*x))*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n/sqrt(c*sin(a + b*x)), x), x, -c*(d*cos(a + b*x))**(n + S(1))*(sin(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(3)/4, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*d*(c*sin(a + b*x))**(S(3)/2)*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*cos(a + b*x))**n/(c*sin(a + b*x))**(S(3)/2), x), x, -(d*cos(a + b*x))**(n + S(1))*(sin(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(5)/4, n/S(2) + S(1)/2, n/S(2) + S(3)/2, cos(a + b*x)**S(2))/(b*c*d*sqrt(c*sin(a + b*x))*(n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(7), x), x, S(2)*b**S(7)/(S(13)*f*(b*sec(e + f*x))**(S(13)/2)) - S(2)*b**S(5)/(S(3)*f*(b*sec(e + f*x))**(S(9)/2)) + S(6)*b**S(3)/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)) - S(2)*b/(f*sqrt(b*sec(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(5), x), x, -S(2)*b**S(5)/(S(9)*f*(b*sec(e + f*x))**(S(9)/2)) + S(4)*b**S(3)/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)) - S(2)*b/(f*sqrt(b*sec(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(3), x), x, S(2)*b**S(3)/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)) - S(2)*b/(f*sqrt(b*sec(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x), x), x, -S(2)*b/(f*sqrt(b*sec(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x), x), x, sqrt(b)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/f - sqrt(b)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x)**S(3), x), x, S(3)*sqrt(b)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) - S(3)*sqrt(b)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) - (b*sec(e + f*x))**(S(3)/2)*cot(e + f*x)**S(2)/(S(2)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x)**S(5), x), x, S(21)*sqrt(b)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*f) - S(21)*sqrt(b)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*f) - S(7)*(b*sec(e + f*x))**(S(3)/2)*cot(e + f*x)**S(2)/(S(16)*b*f) - (b*sec(e + f*x))**(S(7)/2)*cot(e + f*x)**S(4)/(S(4)*b**S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(6), x), x, -S(2)*b*sin(e + f*x)**S(5)/(S(11)*f*sqrt(b*sec(e + f*x))) - S(20)*b*sin(e + f*x)**S(3)/(S(77)*f*sqrt(b*sec(e + f*x))) - S(40)*b*sin(e + f*x)/(S(77)*f*sqrt(b*sec(e + f*x))) + S(80)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(77)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(4), x), x, -S(2)*b*sin(e + f*x)**S(3)/(S(7)*f*sqrt(b*sec(e + f*x))) - S(4)*b*sin(e + f*x)/(S(7)*f*sqrt(b*sec(e + f*x))) + S(8)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(7)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*sin(e + f*x)**S(2), x), x, -S(2)*b*sin(e + f*x)/(S(3)*f*sqrt(b*sec(e + f*x))) + S(4)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x)), x), x, S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x)**S(2), x), x, -b*csc(e + f*x)/(f*sqrt(b*sec(e + f*x))) + sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x)**S(4), x), x, -b*csc(e + f*x)**S(3)/(S(3)*f*sqrt(b*sec(e + f*x))) - S(5)*b*csc(e + f*x)/(S(6)*f*sqrt(b*sec(e + f*x))) + S(5)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(6)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(b*sec(e + f*x))*csc(e + f*x)**S(6), x), x, -b*csc(e + f*x)**S(5)/(S(5)*f*sqrt(b*sec(e + f*x))) - S(3)*b*csc(e + f*x)**S(3)/(S(10)*f*sqrt(b*sec(e + f*x))) - S(3)*b*csc(e + f*x)/(S(4)*f*sqrt(b*sec(e + f*x))) + S(3)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(4)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(7), x), x, S(2)*b**S(7)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) - S(6)*b**S(5)/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)) + S(2)*b**S(3)/(f*(b*sec(e + f*x))**(S(3)/2)) + S(2)*b*sqrt(b*sec(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(5), x), x, -S(2)*b**S(5)/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)) + S(4)*b**S(3)/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)) + S(2)*b*sqrt(b*sec(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(3), x), x, S(2)*b**S(3)/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)) + S(2)*b*sqrt(b*sec(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x), x), x, S(2)*b*sqrt(b*sec(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*csc(e + f*x), x), x, -b**(S(3)/2)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/f - b**(S(3)/2)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/f + S(2)*b*sqrt(b*sec(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)**S(3), x), x, -S(5)*b**(S(3)/2)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) - S(5)*b**(S(3)/2)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) + S(5)*b*sqrt(b*sec(e + f*x))/(S(2)*f) - (b*sec(e + f*x))**(S(5)/2)*cot(e + f*x)**S(2)/(S(2)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(6), x), x, S(20)*b**S(3)*sin(e + f*x)**S(3)/(S(9)*f*(b*sec(e + f*x))**(S(3)/2)) + S(8)*b**S(3)*sin(e + f*x)/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)) - S(16)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(3)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(2)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)**S(5)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(4), x), x, S(12)*b**S(3)*sin(e + f*x)/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)) - S(24)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(2)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)**S(3)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(2), x), x, -S(4)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(2)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2), x), x, -S(2)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(2)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)**S(2), x), x, -S(3)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(3)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)/f - b*sqrt(b*sec(e + f*x))*csc(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)**S(4), x), x, -S(7)*b**S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))) + S(7)*b*sqrt(b*sec(e + f*x))*sin(e + f*x)/(S(2)*f) - b*sqrt(b*sec(e + f*x))*csc(e + f*x)**S(3)/(S(3)*f) - S(7)*b*sqrt(b*sec(e + f*x))*csc(e + f*x)/(S(6)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(7), x), x, S(2)*b**S(7)/(S(9)*f*(b*sec(e + f*x))**(S(9)/2)) - S(6)*b**S(5)/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)) + S(6)*b**S(3)/(f*sqrt(b*sec(e + f*x))) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(5), x), x, -S(2)*b**S(5)/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)) + S(4)*b**S(3)/(f*sqrt(b*sec(e + f*x))) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(3), x), x, S(2)*b**S(3)/(f*sqrt(b*sec(e + f*x))) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x), x), x, S(2)*b*(b*sec(e + f*x))**(S(3)/2)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*csc(e + f*x), x), x, b**(S(5)/2)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/f - b**(S(5)/2)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/f + S(2)*b*(b*sec(e + f*x))**(S(3)/2)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*csc(e + f*x)**S(3), x), x, S(7)*b**(S(5)/2)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) - S(7)*b**(S(5)/2)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*f) + S(7)*b*(b*sec(e + f*x))**(S(3)/2)/(S(6)*f) - (b*sec(e + f*x))**(S(7)/2)*cot(e + f*x)**S(2)/(S(2)*b*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*csc(e + f*x)**S(5), x), x, S(77)*b**(S(5)/2)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*f) - S(77)*b**(S(5)/2)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*f) + S(77)*b*(b*sec(e + f*x))**(S(3)/2)/(S(48)*f) - S(11)*(b*sec(e + f*x))**(S(7)/2)*cot(e + f*x)**S(2)/(S(16)*b*f) - (b*sec(e + f*x))**(S(11)/2)*cot(e + f*x)**S(4)/(S(4)*b**S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(6), x), x, S(20)*b**S(3)*sin(e + f*x)**S(3)/(S(21)*f*sqrt(b*sec(e + f*x))) + S(40)*b**S(3)*sin(e + f*x)/(S(21)*f*sqrt(b*sec(e + f*x))) - S(80)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(21)*f) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(5)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(4), x), x, S(4)*b**S(3)*sin(e + f*x)/(S(3)*f*sqrt(b*sec(e + f*x))) - S(8)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*f) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**S(3)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*sin(e + f*x)**S(2), x), x, -S(4)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*f) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2), x), x, S(2)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*f) + S(2)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*csc(e + f*x)**S(2), x), x, S(5)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*f) + S(5)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)/(S(3)*f) - b*(b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(5)/2)*csc(e + f*x)**S(4), x), x, S(5)*b**S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(2)*f) + S(5)*b*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)/(S(2)*f) - b*(b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)**S(3)/(S(3)*f) - S(3)*b*(b*sec(e + f*x))**(S(3)/2)*csc(e + f*x)/(S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(7)/sqrt(b*sec(e + f*x)), x), x, S(2)*b**S(7)/(S(15)*f*(b*sec(e + f*x))**(S(15)/2)) - S(6)*b**S(5)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) + S(6)*b**S(3)/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)) - S(2)*b/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(5)/sqrt(b*sec(e + f*x)), x), x, -S(2)*b**S(5)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) + S(4)*b**S(3)/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)) - S(2)*b/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(3)/sqrt(b*sec(e + f*x)), x), x, S(2)*b**S(3)/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)) - S(2)*b/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)/sqrt(b*sec(e + f*x)), x), x, -S(2)*b/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)/sqrt(b*sec(e + f*x)), x), x, -ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(sqrt(b)*f) - atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(sqrt(b)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(3)/sqrt(b*sec(e + f*x)), x), x, -sqrt(b*sec(e + f*x))*cot(e + f*x)**S(2)/(S(2)*b*f) - ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*sqrt(b)*f) - atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*sqrt(b)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(5)/sqrt(b*sec(e + f*x)), x), x, -S(5)*sqrt(b*sec(e + f*x))*cot(e + f*x)**S(2)/(S(16)*b*f) - (b*sec(e + f*x))**(S(5)/2)*cot(e + f*x)**S(4)/(S(4)*b**S(3)*f) - S(5)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*sqrt(b)*f) - S(5)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*sqrt(b)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(6)/sqrt(b*sec(e + f*x)), x), x, -S(2)*b*sin(e + f*x)**S(5)/(S(13)*f*(b*sec(e + f*x))**(S(3)/2)) - S(20)*b*sin(e + f*x)**S(3)/(S(117)*f*(b*sec(e + f*x))**(S(3)/2)) - S(8)*b*sin(e + f*x)/(S(39)*f*(b*sec(e + f*x))**(S(3)/2)) + S(16)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(39)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(4)/sqrt(b*sec(e + f*x)), x), x, -S(2)*b*sin(e + f*x)**S(3)/(S(9)*f*(b*sec(e + f*x))**(S(3)/2)) - S(4)*b*sin(e + f*x)/(S(15)*f*(b*sec(e + f*x))**(S(3)/2)) + S(8)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(15)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(2)/sqrt(b*sec(e + f*x)), x), x, -S(2)*b*sin(e + f*x)/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)) + S(4)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(b*sec(e + f*x)), x), x, S(2)*EllipticE(e/S(2) + f*x/S(2), S(2))/(f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(2)/sqrt(b*sec(e + f*x)), x), x, -b*csc(e + f*x)/(f*(b*sec(e + f*x))**(S(3)/2)) - EllipticE(e/S(2) + f*x/S(2), S(2))/(f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(4)/sqrt(b*sec(e + f*x)), x), x, -b*csc(e + f*x)**S(3)/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)) - b*csc(e + f*x)/(S(2)*f*(b*sec(e + f*x))**(S(3)/2)) - EllipticE(e/S(2) + f*x/S(2), S(2))/(S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(6)/sqrt(b*sec(e + f*x)), x), x, -b*csc(e + f*x)**S(5)/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)) - S(7)*b*csc(e + f*x)**S(3)/(S(30)*f*(b*sec(e + f*x))**(S(3)/2)) - S(7)*b*csc(e + f*x)/(S(20)*f*(b*sec(e + f*x))**(S(3)/2)) - S(7)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(20)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(7)/(b*sec(e + f*x))**(S(3)/2), x), x, S(2)*b**S(7)/(S(17)*f*(b*sec(e + f*x))**(S(17)/2)) - S(6)*b**S(5)/(S(13)*f*(b*sec(e + f*x))**(S(13)/2)) + S(2)*b**S(3)/(S(3)*f*(b*sec(e + f*x))**(S(9)/2)) - S(2)*b/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(5)/(b*sec(e + f*x))**(S(3)/2), x), x, -S(2)*b**S(5)/(S(13)*f*(b*sec(e + f*x))**(S(13)/2)) + S(4)*b**S(3)/(S(9)*f*(b*sec(e + f*x))**(S(9)/2)) - S(2)*b/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(3)/(b*sec(e + f*x))**(S(3)/2), x), x, S(2)*b**S(3)/(S(9)*f*(b*sec(e + f*x))**(S(9)/2)) - S(2)*b/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)/(b*sec(e + f*x))**(S(3)/2), x), x, -S(2)*b/(S(5)*f*(b*sec(e + f*x))**(S(5)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)/(b*sec(e + f*x))**(S(3)/2), x), x, S(2)/(b*f*sqrt(b*sec(e + f*x))) + ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(b**(S(3)/2)*f) - atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(b**(S(3)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(3)/(b*sec(e + f*x))**(S(3)/2), x), x, -(b*sec(e + f*x))**(S(3)/2)*cot(e + f*x)**S(2)/(S(2)*b**S(3)*f) - ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*b**(S(3)/2)*f) + atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*b**(S(3)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(5)/(b*sec(e + f*x))**(S(3)/2), x), x, -(b*sec(e + f*x))**(S(3)/2)*cot(e + f*x)**S(4)/(S(4)*b**S(3)*f) - S(3)*(b*sec(e + f*x))**(S(3)/2)*cot(e + f*x)**S(2)/(S(16)*b**S(3)*f) - S(3)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*b**(S(3)/2)*f) + S(3)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*b**(S(3)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(4)/(b*sec(e + f*x))**(S(3)/2), x), x, -S(2)*b*sin(e + f*x)**S(3)/(S(11)*f*(b*sec(e + f*x))**(S(5)/2)) - S(12)*b*sin(e + f*x)/(S(77)*f*(b*sec(e + f*x))**(S(5)/2)) + S(8)*sin(e + f*x)/(S(77)*b*f*sqrt(b*sec(e + f*x))) + S(8)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(77)*b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(2)/(b*sec(e + f*x))**(S(3)/2), x), x, -S(2)*b*sin(e + f*x)/(S(7)*f*(b*sec(e + f*x))**(S(5)/2)) + S(4)*sin(e + f*x)/(S(21)*b*f*sqrt(b*sec(e + f*x))) + S(4)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(21)*b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(-3)/2), x), x, S(2)*sin(e + f*x)/(S(3)*b*f*sqrt(b*sec(e + f*x))) + S(2)*sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(3)*b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(2)/(b*sec(e + f*x))**(S(3)/2), x), x, -csc(e + f*x)/(b*f*sqrt(b*sec(e + f*x))) - sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(4)/(b*sec(e + f*x))**(S(3)/2), x), x, -csc(e + f*x)**S(3)/(S(3)*b*f*sqrt(b*sec(e + f*x))) + csc(e + f*x)/(S(6)*b*f*sqrt(b*sec(e + f*x))) - sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(6)*b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(6)/(b*sec(e + f*x))**(S(3)/2), x), x, -csc(e + f*x)**S(5)/(S(5)*b*f*sqrt(b*sec(e + f*x))) + csc(e + f*x)**S(3)/(S(30)*b*f*sqrt(b*sec(e + f*x))) + csc(e + f*x)/(S(12)*b*f*sqrt(b*sec(e + f*x))) - sqrt(b*sec(e + f*x))*EllipticF(e/S(2) + f*x/S(2), S(2))*sqrt(cos(e + f*x))/(S(12)*b**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(7)/(b*sec(e + f*x))**(S(5)/2), x), x, S(2)*b**S(7)/(S(19)*f*(b*sec(e + f*x))**(S(19)/2)) - S(2)*b**S(5)/(S(5)*f*(b*sec(e + f*x))**(S(15)/2)) + S(6)*b**S(3)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) - S(2)*b/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(5)/(b*sec(e + f*x))**(S(5)/2), x), x, -S(2)*b**S(5)/(S(15)*f*(b*sec(e + f*x))**(S(15)/2)) + S(4)*b**S(3)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) - S(2)*b/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(3)/(b*sec(e + f*x))**(S(5)/2), x), x, S(2)*b**S(3)/(S(11)*f*(b*sec(e + f*x))**(S(11)/2)) - S(2)*b/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)/(b*sec(e + f*x))**(S(5)/2), x), x, -S(2)*b/(S(7)*f*(b*sec(e + f*x))**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)/(b*sec(e + f*x))**(S(5)/2), x), x, S(2)/(S(3)*b*f*(b*sec(e + f*x))**(S(3)/2)) - ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(b**(S(5)/2)*f) - atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(b**(S(5)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(3)/(b*sec(e + f*x))**(S(5)/2), x), x, -sqrt(b*sec(e + f*x))*cot(e + f*x)**S(2)/(S(2)*b**S(3)*f) + S(3)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*b**(S(5)/2)*f) + S(3)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(4)*b**(S(5)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(5)/(b*sec(e + f*x))**(S(5)/2), x), x, -sqrt(b*sec(e + f*x))*cot(e + f*x)**S(4)/(S(4)*b**S(3)*f) - sqrt(b*sec(e + f*x))*cot(e + f*x)**S(2)/(S(16)*b**S(3)*f) + S(3)*ArcTan(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*b**(S(5)/2)*f) + S(3)*atanh(sqrt(b*sec(e + f*x))/sqrt(b))/(S(32)*b**(S(5)/2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(4)/(b*sec(e + f*x))**(S(5)/2), x), x, -S(2)*b*sin(e + f*x)**S(3)/(S(13)*f*(b*sec(e + f*x))**(S(7)/2)) - S(4)*b*sin(e + f*x)/(S(39)*f*(b*sec(e + f*x))**(S(7)/2)) + S(8)*sin(e + f*x)/(S(195)*b*f*(b*sec(e + f*x))**(S(3)/2)) + S(8)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(65)*b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(2)/(b*sec(e + f*x))**(S(5)/2), x), x, -S(2)*b*sin(e + f*x)/(S(9)*f*(b*sec(e + f*x))**(S(7)/2)) + S(4)*sin(e + f*x)/(S(45)*b*f*(b*sec(e + f*x))**(S(3)/2)) + S(4)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(15)*b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**(S(-5)/2), x), x, S(2)*sin(e + f*x)/(S(5)*b*f*(b*sec(e + f*x))**(S(3)/2)) + S(6)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(5)*b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(2)/(b*sec(e + f*x))**(S(5)/2), x), x, -csc(e + f*x)/(b*f*(b*sec(e + f*x))**(S(3)/2)) - S(3)*EllipticE(e/S(2) + f*x/S(2), S(2))/(b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(4)/(b*sec(e + f*x))**(S(5)/2), x), x, -csc(e + f*x)**S(3)/(S(3)*b*f*(b*sec(e + f*x))**(S(3)/2)) + csc(e + f*x)/(S(2)*b*f*(b*sec(e + f*x))**(S(3)/2)) + EllipticE(e/S(2) + f*x/S(2), S(2))/(S(2)*b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(6)/(b*sec(e + f*x))**(S(5)/2), x), x, -csc(e + f*x)**S(5)/(S(5)*b*f*(b*sec(e + f*x))**(S(3)/2)) + csc(e + f*x)**S(3)/(S(10)*b*f*(b*sec(e + f*x))**(S(3)/2)) + S(3)*csc(e + f*x)/(S(20)*b*f*(b*sec(e + f*x))**(S(3)/2)) + S(3)*EllipticE(e/S(2) + f*x/S(2), S(2))/(S(20)*b**S(2)*f*sqrt(b*sec(e + f*x))*sqrt(cos(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**(S(9)/2)/sqrt(b*sec(e + f*x)), x), x, -b*sin(e + f*x)**(S(7)/2)/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)) - S(7)*b*sin(e + f*x)**(S(3)/2)/(S(30)*f*(b*sec(e + f*x))**(S(3)/2)) + S(7)*sqrt(b*sec(e + f*x))*EllipticE(-Pi/S(4) + e + f*x, S(2))*sqrt(sin(e + f*x))*cos(e + f*x)/(S(20)*b*f*sqrt(sin(S(2)*e + S(2)*f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**(S(5)/2)/sqrt(b*sec(e + f*x)), x), x, -b*sin(e + f*x)**(S(3)/2)/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)) + sqrt(b*sec(e + f*x))*EllipticE(-Pi/S(4) + e + f*x, S(2))*sqrt(sin(e + f*x))*cos(e + f*x)/(S(2)*b*f*sqrt(sin(S(2)*e + S(2)*f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(sin(e + f*x))/sqrt(b*sec(e + f*x)), x), x, sqrt(b*sec(e + f*x))*EllipticE(-Pi/S(4) + e + f*x, S(2))*sqrt(sin(e + f*x))*cos(e + f*x)/(b*f*sqrt(sin(S(2)*e + S(2)*f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(3)/2)), x), x, -S(2)*b/(f*(b*sec(e + f*x))**(S(3)/2)*sqrt(sin(e + f*x))) - S(2)*sqrt(b*sec(e + f*x))*EllipticE(-Pi/S(4) + e + f*x, S(2))*sqrt(sin(e + f*x))*cos(e + f*x)/(b*f*sqrt(sin(S(2)*e + S(2)*f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(7)/2)), x), x, -S(4)*b/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)*sqrt(sin(e + f*x))) - S(2)*b/(S(5)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(5)/2)) - S(4)*sqrt(b*sec(e + f*x))*EllipticE(-Pi/S(4) + e + f*x, S(2))*sqrt(sin(e + f*x))*cos(e + f*x)/(S(5)*b*f*sqrt(sin(S(2)*e + S(2)*f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**(S(3)/2)/sqrt(b*sec(e + f*x)), x), x, -b*sqrt(sin(e + f*x))/(S(2)*f*(b*sec(e + f*x))**(S(3)/2)) + sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*ArcTan(-sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + S(1))/(S(8)*sqrt(b)*f) - sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*ArcTan(sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + S(1))/(S(8)*sqrt(b)*f) - sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*log(-sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + cot(e + f*x) + S(1))/(S(16)*sqrt(b)*f) + sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*log(sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + cot(e + f*x) + S(1))/(S(16)*sqrt(b)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sqrt(sin(e + f*x))), x), x, sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*ArcTan(-sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + S(1))/(S(2)*sqrt(b)*f) - sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*ArcTan(sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + S(1))/(S(2)*sqrt(b)*f) - sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*log(-sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + cot(e + f*x) + S(1))/(S(4)*sqrt(b)*f) + sqrt(S(2))*sqrt(cos(e + f*x)/b)*sqrt(b*sec(e + f*x))*log(sqrt(S(2))*sqrt(b)*sqrt(cos(e + f*x)/b)/sqrt(sin(e + f*x)) + cot(e + f*x) + S(1))/(S(4)*sqrt(b)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(5)/2)), x), x, -S(2)*b/(S(3)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(3)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(9)/2)), x), x, -S(8)*b/(S(21)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(3)/2)) - S(2)*b/(S(7)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(7)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(13)/2)), x), x, -S(64)*b/(S(231)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(3)/2)) - S(16)*b/(S(77)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(7)/2)) - S(2)*b/(S(11)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(11)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/(sqrt(b*sec(e + f*x))*sin(e + f*x)**(S(17)/2)), x), x, -S(256)*b/(S(1155)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(3)/2)) - S(64)*b/(S(385)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(7)/2)) - S(8)*b/(S(55)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(11)/2)) - S(2)*b/(S(15)*f*(b*sec(e + f*x))**(S(3)/2)*sin(e + f*x)**(S(15)/2)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*(d*sec(a + b*x))**(S(5)/2), x), x, d**S(2)*(c*sin(a + b*x))**(m + S(1))*sqrt(d*sec(a + b*x))*(cos(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(7)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*sec(a + b*x)/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*(d*sec(a + b*x))**(S(3)/2), x), x, d*(c*sin(a + b*x))**(m + S(1))*sqrt(d*sec(a + b*x))*(cos(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(5)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m*sqrt(d*sec(a + b*x)), x), x, (c*sin(a + b*x))**(m + S(1))*sqrt(d*sec(a + b*x))*(cos(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(3)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*sec(a + b*x)/(b*c*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m/sqrt(d*sec(a + b*x)), x), x, (c*sin(a + b*x))**(m + S(1))*sqrt(d*sec(a + b*x))*(cos(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(1)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))/(b*c*d*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((c*sin(a + b*x))**m/(d*sec(a + b*x))**(S(3)/2), x), x, (c*sin(a + b*x))**(m + S(1))*sqrt(d*sec(a + b*x))*Hypergeometric2F1(S(-1)/4, m/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(a + b*x)**S(2))*cos(a + b*x)/(b*c*d**S(2)*(m + S(1))*(cos(a + b*x)**S(2))**(S(1)/4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**m*sec(e + f*x)**n, x), x, (cos(e + f*x)**S(2))**(n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))*sin(e + f*x)**(m + S(1))*sec(e + f*x)**(n + S(1))/(f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(e + f*x))**m*sec(e + f*x)**n, x), x, (a*sin(e + f*x))**(m + S(1))*(cos(e + f*x)**S(2))**(n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))*sec(e + f*x)**(n + S(1))/(a*f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**m, x), x, -b*(b*sec(e + f*x))**(n + S(-1))*(sin(e + f*x)**S(2))**(-m/S(2) + S(1)/2)*Hypergeometric2F1(-m/S(2) + S(1)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)**(m + S(-1))/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**m, x), x, -(b*sec(e + f*x))**n*(sin(e + f*x)**S(2))**(-m/S(2) + S(1)/2)*Hypergeometric2F1(-m/S(2) + S(1)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)**(m + S(-1))*cos(e + f*x)/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(e + f*x))**m*(b*sec(e + f*x))**n, x), x, (a*sin(e + f*x))**(m + S(1))*(b*sec(e + f*x))**(n + S(1))*(cos(e + f*x)**S(2))**(n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))/(a*b*f*(m + S(1))), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((a*sin(e + f*x))**m*(b*sec(e + f*x))**n, x), x, (a*sin(e + f*x))**(m + S(1))*(b*sec(e + f*x))**n*(cos(e + f*x)**S(2))**(n/S(2) + S(1)/2)*Hypergeometric2F1(m/S(2) + S(1)/2, n/S(2) + S(1)/2, m/S(2) + S(3)/2, sin(e + f*x)**S(2))*sec(e + f*x)/(a*f*(m + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**S(5), x), x, -b**S(5)*(b*sec(e + f*x))**(n + S(-5))/(f*(-n + S(5))) + S(2)*b**S(3)*(b*sec(e + f*x))**(n + S(-3))/(f*(-n + S(3))) - b*(b*sec(e + f*x))**(n + S(-1))/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**S(3), x), x, b**S(3)*(b*sec(e + f*x))**(n + S(-3))/(f*(-n + S(3))) - b*(b*sec(e + f*x))**(n + S(-1))/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x), x), x, -b*(b*sec(e + f*x))**(n + S(-1))/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*csc(e + f*x), x), x, -(b*sec(e + f*x))**(n + S(1))*Hypergeometric2F1(S(1), n/S(2) + S(1)/2, n/S(2) + S(3)/2, sec(e + f*x)**S(2))/(b*f*(n + S(1))), expand=True, _diff=True, _numerical=True) # long time assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*csc(e + f*x)**S(3), x), x, (b*sec(e + f*x))**(n + S(3))*Hypergeometric2F1(S(2), n/S(2) + S(3)/2, n/S(2) + S(5)/2, sec(e + f*x)**S(2))/(b**S(3)*f*(n + S(3))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**S(6), x), x, -(b*sec(e + f*x))**n*Hypergeometric2F1(S(-5)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)*cos(e + f*x)/(f*(-n + S(1))*sqrt(sin(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**S(4), x), x, -(b*sec(e + f*x))**n*Hypergeometric2F1(S(-3)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)*cos(e + f*x)/(f*(-n + S(1))*sqrt(sin(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*sin(e + f*x)**S(2), x), x, -(b*sec(e + f*x))**n*Hypergeometric2F1(S(-1)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)*cos(e + f*x)/(f*(-n + S(1))*sqrt(sin(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n, x), x, -b*(b*sec(e + f*x))**(n + S(-1))*Hypergeometric2F1(S(1)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*sin(e + f*x)/(f*(-n + S(1))*sqrt(sin(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*csc(e + f*x)**S(2), x), x, -(b*sec(e + f*x))**n*sqrt(sin(e + f*x)**S(2))*Hypergeometric2F1(S(3)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*cot(e + f*x)/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(e + f*x))**n*csc(e + f*x)**S(4), x), x, -(b*sec(e + f*x))**n*sqrt(sin(e + f*x)**S(2))*Hypergeometric2F1(S(5)/2, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(e + f*x)**S(2))*cot(e + f*x)/(f*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(a + b*x))**n*(c*sin(a + b*x))**(S(3)/2), x), x, -c*(b*sec(a + b*x))**n*sqrt(c*sin(a + b*x))*Hypergeometric2F1(S(-1)/4, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(a + b*x)**S(2))*cos(a + b*x)/(b*(-n + S(1))*(sin(a + b*x)**S(2))**(S(1)/4)), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(a + b*x))**n*sqrt(c*sin(a + b*x)), x), x, -c*(b*sec(a + b*x))**n*(sin(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(1)/4, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(a + b*x)**S(2))*cos(a + b*x)/(b*sqrt(c*sin(a + b*x))*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(a + b*x))**n/sqrt(c*sin(a + b*x)), x), x, -c*(b*sec(a + b*x))**n*(sin(a + b*x)**S(2))**(S(3)/4)*Hypergeometric2F1(S(3)/4, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(a + b*x)**S(2))*cos(a + b*x)/(b*(c*sin(a + b*x))**(S(3)/2)*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((b*sec(a + b*x))**n/(c*sin(a + b*x))**(S(3)/2), x), x, -(b*sec(a + b*x))**n*(sin(a + b*x)**S(2))**(S(1)/4)*Hypergeometric2F1(S(5)/4, -n/S(2) + S(1)/2, -n/S(2) + S(3)/2, cos(a + b*x)**S(2))*cos(a + b*x)/(b*c*sqrt(c*sin(a + b*x))*(-n + S(1))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*sin(e + f*x)**S(4), x), x, -S(2)*d**S(3)*cos(e + f*x)/(S(7)*f*(d*csc(e + f*x))**(S(5)/2)) - S(10)*d*cos(e + f*x)/(S(21)*f*sqrt(d*csc(e + f*x))) + S(10)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(21)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*sin(e + f*x)**S(3), x), x, -S(2)*d**S(2)*cos(e + f*x)/(S(5)*f*(d*csc(e + f*x))**(S(3)/2)) + S(6)*d*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*sin(e + f*x)**S(2), x), x, -S(2)*d*cos(e + f*x)/(S(3)*f*sqrt(d*csc(e + f*x))) + S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*sin(e + f*x), x), x, S(2)*d*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x)), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*csc(e + f*x), x), x, -S(2)*d*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(2)*sqrt(d*csc(e + f*x))*cos(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*csc(e + f*x)**S(2), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*f) - S(2)*(d*csc(e + f*x))**(S(3)/2)*cos(e + f*x)/(S(3)*d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sqrt(d*csc(e + f*x))*csc(e + f*x)**S(3), x), x, -S(6)*d*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(6)*sqrt(d*csc(e + f*x))*cos(e + f*x)/(S(5)*f) - S(2)*(d*csc(e + f*x))**(S(5)/2)*cos(e + f*x)/(S(5)*d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*sin(e + f*x)**S(5), x), x, -S(2)*d**S(4)*cos(e + f*x)/(S(7)*f*(d*csc(e + f*x))**(S(5)/2)) - S(10)*d**S(2)*cos(e + f*x)/(S(21)*f*sqrt(d*csc(e + f*x))) + S(10)*d*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(21)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*sin(e + f*x)**S(4), x), x, -S(2)*d**S(3)*cos(e + f*x)/(S(5)*f*(d*csc(e + f*x))**(S(3)/2)) + S(6)*d**S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*sin(e + f*x)**S(3), x), x, -S(2)*d**S(2)*cos(e + f*x)/(S(3)*f*sqrt(d*csc(e + f*x))) + S(2)*d*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*sin(e + f*x)**S(2), x), x, S(2)*d**S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*sin(e + f*x), x), x, S(2)*d*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2), x), x, -S(2)*d**S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(2)*d*sqrt(d*csc(e + f*x))*cos(e + f*x)/f, expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*csc(e + f*x), x), x, S(2)*d*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*f) - S(2)*(d*csc(e + f*x))**(S(3)/2)*cos(e + f*x)/(S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(3)/2)*csc(e + f*x)**S(2), x), x, -S(6)*d**S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(6)*d*sqrt(d*csc(e + f*x))*cos(e + f*x)/(S(5)*f) - S(2)*(d*csc(e + f*x))**(S(5)/2)*cos(e + f*x)/(S(5)*d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(3)/sqrt(d*csc(e + f*x)), x), x, -S(2)*d**S(2)*cos(e + f*x)/(S(7)*f*(d*csc(e + f*x))**(S(5)/2)) - S(10)*cos(e + f*x)/(S(21)*f*sqrt(d*csc(e + f*x))) + S(10)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(21)*d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(2)/sqrt(d*csc(e + f*x)), x), x, -S(2)*d*cos(e + f*x)/(S(5)*f*(d*csc(e + f*x))**(S(3)/2)) + S(6)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)/sqrt(d*csc(e + f*x)), x), x, -S(2)*cos(e + f*x)/(S(3)*f*sqrt(d*csc(e + f*x))) + S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(S(1)/sqrt(d*csc(e + f*x)), x), x, S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)/sqrt(d*csc(e + f*x)), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(2)/sqrt(d*csc(e + f*x)), x), x, -S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(2)*sqrt(d*csc(e + f*x))*cos(e + f*x)/(d*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(3)/sqrt(d*csc(e + f*x)), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*d*f) - S(2)*(d*csc(e + f*x))**(S(3)/2)*cos(e + f*x)/(S(3)*d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)**S(2)/(d*csc(e + f*x))**(S(3)/2), x), x, -S(2)*d*cos(e + f*x)/(S(7)*f*(d*csc(e + f*x))**(S(5)/2)) - S(10)*cos(e + f*x)/(S(21)*d*f*sqrt(d*csc(e + f*x))) + S(10)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(21)*d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(sin(e + f*x)/(d*csc(e + f*x))**(S(3)/2), x), x, -S(2)*cos(e + f*x)/(S(5)*f*(d*csc(e + f*x))**(S(3)/2)) + S(6)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*d*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((d*csc(e + f*x))**(S(-3)/2), x), x, -S(2)*cos(e + f*x)/(S(3)*d*f*sqrt(d*csc(e + f*x))) + S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)/(d*csc(e + f*x))**(S(3)/2), x), x, S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(d*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(2)/(d*csc(e + f*x))**(S(3)/2), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(3)/(d*csc(e + f*x))**(S(3)/2), x), x, -S(2)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(d*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(2)*sqrt(d*csc(e + f*x))*cos(e + f*x)/(d**S(2)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(4)/(d*csc(e + f*x))**(S(3)/2), x), x, S(2)*sqrt(d*csc(e + f*x))*EllipticF(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))*sqrt(sin(e + f*x))/(S(3)*d**S(2)*f) - S(2)*(d*csc(e + f*x))**(S(3)/2)*cos(e + f*x)/(S(3)*d**S(3)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate(csc(e + f*x)**S(5)/(d*csc(e + f*x))**(S(3)/2), x), x, -S(6)*EllipticE(-Pi/S(4) + e/S(2) + f*x/S(2), S(2))/(S(5)*d*f*sqrt(d*csc(e + f*x))*sqrt(sin(e + f*x))) - S(6)*sqrt(d*csc(e + f*x))*cos(e + f*x)/(S(5)*d**S(2)*f) - S(2)*(d*csc(e + f*x))**(S(5)/2)*cos(e + f*x)/(S(5)*d**S(4)*f), expand=True, _diff=True, _numerical=True) assert rubi_test(rubi_integrate((a*sin(e + f*x))**m*(b*csc(e + f*x))**n, x), x, (a*sin(e + f*x))**(m + S(1))*(b*csc(e + f*x))**n*Hypergeometric2F1(S(1)/2, m/S(2) - n/S(2) + S(1)/2, m/S(2) - n/S(2) + S(3)/2, sin(e + f*x)**S(2))*cos(e + f*x)/(a*f*(m - n + S(1))*sqrt(cos(e + f*x)**S(2))), expand=True, _diff=True, _numerical=True)
240.107402
1,406
0.535374
39,656
165,434
2.178283
0.015382
0.063601
0.074703
0.068811
0.918038
0.914739
0.911556
0.90718
0.899643
0.889005
0
0.049768
0.112873
165,434
688
1,407
240.456395
0.538822
0.001819
0
0.008902
0
0.001484
0.000505
0
0
0
0
0
0.796736
1
0.001484
false
0
0.032641
0
0.034125
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
ed560a4fccf0d8a26b6cf41e1e69866aa0233ac6
363
py
Python
backup_codes/generated_xor_data.py
shridharmishra4/Artificial-neural-netowrks-in-C
40f89aa3da9b2cc036df372e5bf5b6e7031d0d87
[ "Apache-2.0" ]
null
null
null
backup_codes/generated_xor_data.py
shridharmishra4/Artificial-neural-netowrks-in-C
40f89aa3da9b2cc036df372e5bf5b6e7031d0d87
[ "Apache-2.0" ]
null
null
null
backup_codes/generated_xor_data.py
shridharmishra4/Artificial-neural-netowrks-in-C
40f89aa3da9b2cc036df372e5bf5b6e7031d0d87
[ "Apache-2.0" ]
null
null
null
with open("validate.txt","w") as f: for i in range(25): f.writelines("{0} {1} {2}\n".format(0,0,0^0)) for i in range(25): f.writelines("{0} {1} {2}\n".format(0, 1, 0 ^ 1)) for i in range(25): f.writelines("{0} {1} {2}\n".format(1, 0, 1 ^ 0)) for i in range(25): f.writelines("{0} {1} {2}\n".format(1, 1, 1 ^ 1))
27.923077
57
0.482094
71
363
2.464789
0.253521
0.08
0.137143
0.251429
0.811429
0.811429
0.811429
0.811429
0.811429
0.811429
0
0.134328
0.261708
363
12
58
30.25
0.518657
0
0
0.444444
0
0
0.180055
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
9c1dc3417d4b0cae09f5728d8a9a1215e29755db
5,608
py
Python
tests/test_syntax/inline/images.py
rossant/Python-Markdown
8c0698b013edeb82586290e637df7c30ede81b5a
[ "BSD-3-Clause" ]
null
null
null
tests/test_syntax/inline/images.py
rossant/Python-Markdown
8c0698b013edeb82586290e637df7c30ede81b5a
[ "BSD-3-Clause" ]
null
null
null
tests/test_syntax/inline/images.py
rossant/Python-Markdown
8c0698b013edeb82586290e637df7c30ede81b5a
[ "BSD-3-Clause" ]
null
null
null
from markdown.test_tools import TestCase class TestAdvancedImages(TestCase): def test_nested_square_brackets(self): self.assertMarkdownRenders( """![Text[[[[[[[]]]]]]][]](http://link.com/image.png) more text""", """<p><img alt="Text[[[[[[[]]]]]]][]" src="http://link.com/image.png" /> more text</p>""" ) def test_nested_round_brackets(self): self.assertMarkdownRenders( """![Text](http://link.com/(((((((()))))))()).png) more text""", """<p><img alt="Text" src="http://link.com/(((((((()))))))()).png" /> more text</p>""" ) def test_uneven_brackets_with_titles1(self): self.assertMarkdownRenders( """![Text](http://link.com/(.png"title") more text""", """<p><img alt="Text" src="http://link.com/(.png" title="title" /> more text</p>""" ) def test_uneven_brackets_with_titles2(self): self.assertMarkdownRenders( """![Text](http://link.com/('.png"title") more text""", """<p><img alt="Text" src="http://link.com/('.png" title="title" /> more text</p>""" ) def test_uneven_brackets_with_titles3(self): self.assertMarkdownRenders( """![Text](http://link.com/(.png"title)") more text""", """<p><img alt="Text" src="http://link.com/(.png" title="title)" /> more text</p>""" ) def test_uneven_brackets_with_titles4(self): self.assertMarkdownRenders( """![Text](http://link.com/(.png "title") more text""", """<p><img alt="Text" src="http://link.com/(.png" title="title" /> more text</p>""" ) def test_uneven_brackets_with_titles5(self): self.assertMarkdownRenders( """![Text](http://link.com/(.png "title)") more text""", """<p><img alt="Text" src="http://link.com/(.png" title="title)" /> more text</p>""" ) def test_mixed_title_quotes1(self): self.assertMarkdownRenders( """![Text](http://link.com/'.png"title") more text""", """<p><img alt="Text" src="http://link.com/'.png" title="title" /> more text</p>""" ) def test_mixed_title_quotes2(self): self.assertMarkdownRenders( """![Text](http://link.com/".png'title') more text""", """<p><img alt="Text" src="http://link.com/&quot;.png" title="title" /> more text</p>""" ) def test_mixed_title_quotes3(self): self.assertMarkdownRenders( """![Text](http://link.com/with spaces.png'"and quotes" 'and title') more text""", """<p><img alt="Text" src="http://link.com/with spaces.png" title="&quot;and quotes&quot; 'and title" />""" """ more text</p>""" ) def test_mixed_title_quotes4(self): self.assertMarkdownRenders( """![Text](http://link.com/with spaces'.png"and quotes" 'and title") more text""", """<p><img alt="Text" src="http://link.com/with spaces'.png" title="and quotes&quot; 'and title" />""" """ more text</p>""" ) def test_mixed_title_quotes5(self): self.assertMarkdownRenders( """![Text](http://link.com/with spaces .png'"and quotes" 'and title') more text""", """<p><img alt="Text" src="http://link.com/with spaces .png" title="&quot;and quotes&quot;""" """ 'and title" /> more text</p>""" ) def test_mixed_title_quotes6(self): self.assertMarkdownRenders( """![Text](http://link.com/with spaces "and quotes".png 'and title') more text""", """<p><img alt="Text" src="http://link.com/with spaces &quot;and quotes&quot;.png" title="and title" />""" """ more text</p>""" ) def test_single_quote(self): self.assertMarkdownRenders( """![test](link"notitle.png)""", """<p><img alt="test" src="link&quot;notitle.png" /></p>""" ) def test_angle_with_mixed_title_quotes(self): self.assertMarkdownRenders( """![Text](<http://link.com/with spaces '"and quotes".png> 'and title') more text""", """<p><img alt="Text" src="http://link.com/with spaces '&quot;and quotes&quot;.png" title="and title" />""" """ more text</p>""" ) def test_misc(self): self.assertMarkdownRenders( """![Poster](http://humane_man.jpg "The most humane man.")""", """<p><img alt="Poster" src="http://humane_man.jpg" title="The most humane man." /></p>""" ) def test_misc_ref(self): self.assertMarkdownRenders( self.dedent( """ ![Poster][] [Poster]:http://humane_man.jpg "The most humane man." """ ), self.dedent( """ <p><img alt="Poster" src="http://humane_man.jpg" title="The most humane man." /></p> """ ) ) def test_misc_blank(self): self.assertMarkdownRenders( """![Blank]()""", """<p><img alt="Blank" src="" /></p>""" ) def test_misc_img_title(self): self.assertMarkdownRenders( """![Image](http://humane man.jpg "The most humane man.")""", """<p><img alt="Image" src="http://humane man.jpg" title="The most humane man." /></p>""" ) def test_misc_img(self): self.assertMarkdownRenders( """![Image](http://humane man.jpg)""", """<p><img alt="Image" src="http://humane man.jpg" /></p>""" )
40.057143
119
0.526034
647
5,608
4.457496
0.09119
0.07767
0.106796
0.116505
0.843967
0.837379
0.834951
0.834951
0.750347
0.722607
0
0.00264
0.256954
5,608
139
120
40.345324
0.689465
0
0
0.258824
0
0
0
0
0
0
0
0
0.235294
1
0.235294
false
0
0.011765
0
0.258824
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
9c2352932d298fc00d115f31134fdaff41eb9e22
101
py
Python
Python/Tests/TestData/Grammar/ClassDef3x.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/Grammar/ClassDef3x.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/Grammar/ClassDef3x.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
class C(metaclass=1): pass class C(object, metaclass=1): pass class C(list, object, fob=1): pass
25.25
35
0.693069
19
101
3.736842
0.473684
0.253521
0.394366
0.535211
0.56338
0
0
0
0
0
0
0.034884
0.148515
101
3
36
33.666667
0.77907
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
1
0
null
null
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
7
9c7a54f24104737efcd8b6d83b4fad34015e1189
2,233
py
Python
miniconda3-lnx/pkgs/cryptography-2.9.2-py37h1ba5d50_0/info/test/run_test.py
Thibaut-Kovaltchouk/MultiPyzo
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
[ "CC0-1.0" ]
null
null
null
miniconda3-lnx/pkgs/cryptography-2.9.2-py37h1ba5d50_0/info/test/run_test.py
Thibaut-Kovaltchouk/MultiPyzo
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
[ "CC0-1.0" ]
null
null
null
miniconda3-lnx/pkgs/cryptography-2.9.2-py37h1ba5d50_0/info/test/run_test.py
Thibaut-Kovaltchouk/MultiPyzo
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
[ "CC0-1.0" ]
null
null
null
# tests for cryptography-2.9.2-py37h1ba5d50_0 (this is a generated file); print('===== testing package: cryptography-2.9.2-py37h1ba5d50_0 ====='); print('running run_test.py'); # --- run_test.py (begin) --- import subprocess import time from cryptography.hazmat.backends.openssl import backend # the version that cryptography uses linked_version = backend.openssl_version_text() # the version present in the conda environment env_version = subprocess.check_output('openssl version', shell=True).decode('utf8').strip() print('Version used by cryptography:\n{linked_version}'.format(linked_version=linked_version)) print('Version in conda environment:\n{env_version}'.format(env_version=env_version)) # avoid race condition between print and (possible) AssertionError time.sleep(1) # linking problems have appeared on windows before (see issue #38), # and were only caught by lucky accident through the test suite. # This is intended to ensure it does not happen again. assert linked_version == env_version # --- run_test.py (end) --- print('===== cryptography-2.9.2-py37h1ba5d50_0 OK ====='); print("import: 'cryptography'") import cryptography print("import: 'cryptography.fernet'") import cryptography.fernet print("import: 'cryptography.hazmat'") import cryptography.hazmat print("import: 'cryptography.hazmat.backends'") import cryptography.hazmat.backends print("import: 'cryptography.hazmat.backends.openssl'") import cryptography.hazmat.backends.openssl print("import: 'cryptography.hazmat.bindings'") import cryptography.hazmat.bindings print("import: 'cryptography.hazmat.bindings.openssl'") import cryptography.hazmat.bindings.openssl print("import: 'cryptography.hazmat.primitives'") import cryptography.hazmat.primitives print("import: 'cryptography.hazmat.primitives.asymmetric'") import cryptography.hazmat.primitives.asymmetric print("import: 'cryptography.hazmat.primitives.ciphers'") import cryptography.hazmat.primitives.ciphers print("import: 'cryptography.hazmat.primitives.kdf'") import cryptography.hazmat.primitives.kdf print("import: 'cryptography.hazmat.primitives.twofactor'") import cryptography.hazmat.primitives.twofactor print("import: 'cryptography.x509'") import cryptography.x509
33.833333
94
0.788177
276
2,233
6.307971
0.344203
0.268811
0.275704
0.166571
0.431936
0.048248
0
0
0
0
0
0.019569
0.08464
2,233
65
95
34.353846
0.832192
0.202866
0
0
1
0
0.42275
0.280136
0
0
0
0
0.026316
1
0
false
0
0.763158
0
0.763158
0.473684
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
7
135585d421a2730371b73ae0165bf864988ddb30
415
py
Python
star4.py
anmol1455/python
8e3858bdebb21ec3c9e8147ceef17a82b4a36926
[ "bzip2-1.0.6" ]
null
null
null
star4.py
anmol1455/python
8e3858bdebb21ec3c9e8147ceef17a82b4a36926
[ "bzip2-1.0.6" ]
null
null
null
star4.py
anmol1455/python
8e3858bdebb21ec3c9e8147ceef17a82b4a36926
[ "bzip2-1.0.6" ]
null
null
null
for i in range(1,4,1): print() for j in range(1,3): print("*",end=" ") for k in range(1,3): print(" ",end=" ") for j in range(1,3): print("*",end=" ") for i in range(1,3): print() for j in range(1,7): print("*",end=" ") for i in range(1,8,1): print() for j in range(1,3): print(" ",end=" ") for k in range(1,3): print("*",end=" ")
21.842105
26
0.438554
71
415
2.56338
0.183099
0.346154
0.395604
0.296703
0.983516
0.912088
0.818681
0.659341
0.659341
0.527473
0
0.071942
0.33012
415
18
27
23.055556
0.582734
0
0
0.777778
0
0
0.028916
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
8
b93e078ae56275ddb2657451a96fcc0471f0fadb
57
py
Python
src/compiler.py
Mikerah/py-circom
5ea4c788342301a24b9245feb6421746876df630
[ "MIT" ]
2
2020-04-18T12:58:44.000Z
2020-04-18T13:05:32.000Z
src/compiler.py
Mikerah/py-circom
5ea4c788342301a24b9245feb6421746876df630
[ "MIT" ]
null
null
null
src/compiler.py
Mikerah/py-circom
5ea4c788342301a24b9245feb6421746876df630
[ "MIT" ]
null
null
null
from Lark import Lark def circom_compiler(): pass
8.142857
22
0.701754
8
57
4.875
0.875
0
0
0
0
0
0
0
0
0
0
0
0.245614
57
6
23
9.5
0.906977
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
1
0
1
0
0
7
b94dcc36e2d626e18ccbca2731d79a8cf25be612
85
py
Python
src/stitch_m/scripts/commandline.py
iandobbie/StitchM
73e6692562c22106bf48454876050d14a6d52213
[ "BSD-3-Clause" ]
null
null
null
src/stitch_m/scripts/commandline.py
iandobbie/StitchM
73e6692562c22106bf48454876050d14a6d52213
[ "BSD-3-Clause" ]
5
2021-02-01T20:49:13.000Z
2021-09-09T21:20:35.000Z
src/stitch_m/scripts/commandline.py
iandobbie/StitchM
73e6692562c22106bf48454876050d14a6d52213
[ "BSD-3-Clause" ]
1
2021-02-03T14:39:17.000Z
2021-02-03T14:39:17.000Z
from stitch_m import argparse_entrypoint def main(): argparse_entrypoint.main()
17
40
0.788235
11
85
5.818182
0.727273
0.5625
0
0
0
0
0
0
0
0
0
0
0.141176
85
5
41
17
0.876712
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
b95bc40a653e805035b33629732db365d61193cf
26,566
py
Python
txdav/caldav/datastore/test/test_sql_external.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
1
2017-02-18T19:22:19.000Z
2017-02-18T19:22:19.000Z
txdav/caldav/datastore/test/test_sql_external.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
null
null
null
txdav/caldav/datastore/test/test_sql_external.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
null
null
null
## # Copyright (c) 2013-2015 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from twisted.internet.defer import inlineCallbacks from twext.python.clsprop import classproperty from txdav.common.datastore.test.util import populateCalendarsFrom from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \ _BIND_STATUS_INVITED, _BIND_MODE_DIRECT, _BIND_STATUS_ACCEPTED from txdav.common.datastore.podding.test.util import MultiStoreConduitTest class BaseSharingTests(MultiStoreConduitTest): """ Test store-based calendar sharing. """ @inlineCallbacks def setUp(self): yield super(BaseSharingTests, self).setUp() yield self.populate() @inlineCallbacks def populate(self): yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0)) self.notifierFactory.reset() cal1 = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:uid1 DTSTART:20131122T140000 DURATION:PT1H CREATED:20060102T190000Z DTSTAMP:20051222T210507Z SUMMARY:event 1 END:VEVENT END:VCALENDAR """ @classproperty(cache=False) def requirements(cls): #@NoSelf return { "user01": { "calendar": { "cal1.ics": (cls.cal1, None,), }, "inbox": { }, }, "user02": { "calendar": { }, "inbox": { }, }, "user03": { "calendar": { }, "inbox": { }, }, } class CalendarSharing(BaseSharingTests): @inlineCallbacks def test_no_shares(self): """ Test that initially there are no shares. """ calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) @inlineCallbacks def test_invite_sharee(self): """ Test invite/uninvite creates/removes shares and notifications. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) self.assertEqual(invites[0].uid, shareeView.shareUID()) self.assertEqual(invites[0].ownerUID, "user01") self.assertEqual(invites[0].shareeUID, "puser02") self.assertEqual(invites[0].mode, _BIND_MODE_READ) self.assertEqual(invites[0].status, _BIND_STATUS_INVITED) self.assertEqual(invites[0].summary, "summary") inviteUID = shareeView.shareUID() sharedName = shareeView.name() self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser02", name=sharedName) self.assertTrue(shared is None) notifyHome = yield self.theTransactionUnderTest(1).notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID, ]) yield self.commitTransaction(1) # Uninvite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) yield calendar.uninviteUIDFromShare("puser02") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) notifyHome = yield self.theTransactionUnderTest(1).notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, []) yield self.commitTransaction(1) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) yield calendar.setShared(False) self.assertFalse(calendar.isShared()) @inlineCallbacks def test_accept_share(self): """ Test that invite+accept creates shares and notifications. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) inviteUID = shareeView.shareUID() sharedName = shareeView.name() self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser02", name=sharedName) self.assertTrue(shared is None) notifyHome = yield self.theTransactionUnderTest(1).notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 1) yield self.commitTransaction(1) # Accept txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.acceptShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is not None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) # Re-accept txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.acceptShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is not None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) @inlineCallbacks def test_decline_share(self): """ Test that invite+decline does not create shares but does create notifications. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) inviteUID = shareeView.shareUID() sharedName = shareeView.name() self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) notifyHome = yield txn2.notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 1) yield self.commitTransaction(1) # Decline txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.declineShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) # Redecline txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.declineShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) @inlineCallbacks def test_accept_decline_share(self): """ Test that invite+accept/decline creates/removes shares and notifications. Decline via the home. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) inviteUID = shareeView.shareUID() sharedName = shareeView.name() self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) notifyHome = yield txn2.notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 1) yield self.commitTransaction(1) # Accept txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.acceptShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is not None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) yield self.commitTransaction(0) # Decline txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.declineShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertTrue(calendar.isShared()) @inlineCallbacks def test_accept_remove_share(self): """ Test that invite+accept/decline creates/removes shares and notifications. Decline via the shared collection (removal). """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) inviteUID = shareeView.shareUID() sharedName = shareeView.name() yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) notifyHome = yield txn2.notificationsWithUID("puser02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 1) yield self.commitTransaction(1) # Accept txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") yield shareeHome.acceptShare(inviteUID) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is not None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) yield self.commitTransaction(0) # Delete txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) yield shared.deleteShare() yield self.commitTransaction(1) txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertTrue(shared is None) yield self.commitTransaction(1) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(notifications, [inviteUID + "-reply", ]) @inlineCallbacks def test_accept_remove_accept(self): yield self.createShare() yield self.removeShare() shared_name = yield self.createShare() txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name) self.assertTrue(otherCal is not None) yield self.commitTransaction(1) @inlineCallbacks def test_accept_remove_accept_newcalendar(self): """ Test that deleting and re-creating a share with the same sharer name works. """ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True) yield home.createCalendarWithName("shared") yield self.commitTransaction(0) shared_name = yield self.createShare(name="shared") txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name) self.assertTrue(otherCal is not None) yield self.commitTransaction(1) yield self.removeShare(name="shared") home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True) yield home.removeCalendarWithName("shared") yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name) self.assertTrue(otherCal is None) yield self.commitTransaction(1) home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True) yield home.createCalendarWithName("shared") yield self.commitTransaction(0) shared_name = yield self.createShare(name="shared") txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name) self.assertTrue(otherCal is not None) yield self.commitTransaction(1) @inlineCallbacks def test_inviteProperties(self): calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") yield calendar.setUsedForFreeBusy(True) yield self.commitTransaction(0) shared_name = yield self.createShare() txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name) self.assertFalse(shared.isUsedForFreeBusy()) @inlineCallbacks def test_direct_sharee(self): """ Test invite/uninvite creates/removes shares and notifications. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) self.assertFalse(calendar.isShared()) shareeView = yield calendar.directShareWithUser("puser02") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 1) self.assertEqual(invites[0].uid, shareeView.shareUID()) self.assertEqual(invites[0].ownerUID, "user01") self.assertEqual(invites[0].shareeUID, "puser02") self.assertEqual(invites[0].mode, _BIND_MODE_DIRECT) self.assertEqual(invites[0].status, _BIND_STATUS_ACCEPTED) sharedName = shareeView.name() yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="user02", name=sharedName) self.assertTrue(shared is not None) notifyHome = yield txn2.notificationsWithUID("user02") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 0) yield self.commitTransaction(1) # Remove txn2 = self.theTransactionUnderTest(1) shared = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) yield shared.deleteShare() yield self.commitTransaction(1) calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) notifyHome = yield self.theTransactionUnderTest(0).notificationsWithUID("user01") notifications = yield notifyHome.listNotificationObjects() self.assertEqual(len(notifications), 0) test_direct_sharee.skip = True @inlineCallbacks def test_sharedNotifierID(self): shared_name = yield self.createShare() home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01") self.assertEquals(home.notifierID(), ("CalDAV", "user01",)) calendar = yield home.calendarWithName("calendar") self.assertEquals(calendar.notifierID(), ("CalDAV", "user01/calendar",)) yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) home = yield self.homeUnderTest(txn=txn2, name="puser02") self.assertEquals(home.notifierID(), ("CalDAV", "puser02",)) calendar = yield home.calendarWithName(shared_name) self.assertEquals(calendar.notifierID(), ("CalDAV", "user01/calendar",)) @inlineCallbacks def test_sharedWithTwo(self): shared_name1 = yield self.createShare(shareeGUID="puser02") shared_name2 = yield self.createShare(shareeGUID="puser03") txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=shared_name1) self.assertTrue(otherCal is not None) yield self.commitTransaction(1) txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser03", name=shared_name2) self.assertTrue(otherCal is not None) yield self.commitTransaction(1) class SharingRevisions(BaseSharingTests): """ Test store-based sharing and interaction with revision table. """ @inlineCallbacks def test_shareWithRevision(self): """ Verify that bindRevision on calendars and shared calendars has the correct value. """ sharedName = yield self.createShare() normalCal = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertEqual(normalCal._bindRevision, 0) txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertNotEqual(otherCal._bindRevision, 0) @inlineCallbacks def test_updateShareRevision(self): """ Verify that bindRevision on calendars and shared calendars has the correct value. """ # Invite calendar = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") invites = yield calendar.sharingInvites() self.assertEqual(len(invites), 0) shareeView = yield calendar.inviteUIDToShare("puser02", _BIND_MODE_READ, "summary") newCalName = shareeView.shareUID() yield self.commitTransaction(0) normalCal = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertEqual(normalCal._bindRevision, 0) yield self.commitTransaction(0) txn2 = self.theTransactionUnderTest(1) otherHome = yield self.homeUnderTest(txn=txn2, name="puser02") otherCal = yield otherHome.anyObjectWithShareUID(newCalName) self.assertEqual(otherCal._bindRevision, 0) yield self.commitTransaction(1) txn2 = self.theTransactionUnderTest(1) shareeHome = yield self.homeUnderTest(txn=txn2, name="puser02") shareeView = yield shareeHome.acceptShare(newCalName) sharedName = shareeView.name() yield self.commitTransaction(1) normalCal = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertEqual(normalCal._bindRevision, 0) txn2 = self.theTransactionUnderTest(1) otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertNotEqual(otherCal._bindRevision, 0) @inlineCallbacks def test_sharedRevisions(self): """ Verify that resourceNamesSinceRevision returns all resources after initial bind and sync. """ sharedName = yield self.createShare() normalCal = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar") self.assertEqual(normalCal._bindRevision, 0) txn2 = self.theTransactionUnderTest(1) otherHome = yield self.homeUnderTest(txn=txn2, name="puser02") otherCal = yield self.calendarUnderTest(txn=txn2, home="puser02", name=sharedName) self.assertNotEqual(otherCal._bindRevision, 0) sync_token = yield otherCal.syncToken() revision = otherCal.revisionFromToken(sync_token) changed, deleted, invalid = yield otherCal.resourceNamesSinceRevision(0) self.assertNotEqual(len(changed), 0) self.assertEqual(len(deleted), 0) self.assertEqual(len(invalid), 0) changed, deleted, invalid = yield otherCal.resourceNamesSinceRevision(revision) self.assertEqual(len(changed), 0) self.assertEqual(len(deleted), 0) self.assertEqual(len(invalid), 0) sync_token = yield otherHome.syncToken() revision = otherHome.revisionFromToken(sync_token) for depth in ("1", "infinity",): changed, deleted, invalid = yield otherHome.resourceNamesSinceRevision(revision - 1, depth) self.assertEqual(len(changed), 0 if depth == "infinity" else 1) self.assertEqual(len(deleted), 0) self.assertEqual(len(invalid), 1 if depth == "infinity" else 0) changed, deleted, invalid = yield otherHome.resourceNamesSinceRevision(revision, depth) self.assertEqual(len(changed), 0) self.assertEqual(len(deleted), 0) self.assertEqual(len(invalid), 1 if depth == "infinity" else 0) yield self.commitTransaction(1) yield self.removeShare() txn2 = self.theTransactionUnderTest(1) otherHome = yield self.homeUnderTest(txn=txn2, name="puser02") for depth in ("1", "infinity",): changed, deleted, invalid = yield otherHome.resourceNamesSinceRevision(revision, depth) self.assertEqual(len(changed), 0) self.assertEqual(len(deleted), 1) self.assertEqual(len(invalid), 0)
38.896047
117
0.682414
2,578
26,566
6.993018
0.103569
0.066896
0.069226
0.077213
0.834813
0.821611
0.801309
0.780896
0.767417
0.756213
0
0.024622
0.214184
26,566
682
118
38.953079
0.838954
0.062335
0
0.763514
0
0
0.055339
0.004395
0
0
0
0
0.25
1
0.040541
false
0
0.011261
0.002252
0.063063
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b97ac8897802026797a1a65c6bbac982555d92e9
11,393
py
Python
api_1.3/containerd/services/images/v1/images_pb2_grpc.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
24
2019-12-16T12:38:51.000Z
2022-02-16T18:44:20.000Z
api_1.5/containerd/services/images/v1/images_pb2_grpc.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
9
2020-03-03T07:42:40.000Z
2021-09-01T10:11:18.000Z
api_1.4/containerd/services/images/v1/images_pb2_grpc.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
10
2019-12-16T11:20:23.000Z
2022-01-24T01:53:13.000Z
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from containerd.services.images.v1 import images_pb2 as containerd_dot_services_dot_images_dot_v1_dot_images__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class ImagesStub(object): """Images is a service that allows one to register images with containerd. In containerd, an image is merely the mapping of a name to a content root, described by a descriptor. The behavior and state of image is purely dictated by the type of the descriptor. From the perspective of this service, these references are mostly shallow, in that the existence of the required content won't be validated until required by consuming services. As such, this can really be considered a "metadata service". """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Get = channel.unary_unary( '/containerd.services.images.v1.Images/Get', request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.SerializeToString, response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.FromString, ) self.List = channel.unary_unary( '/containerd.services.images.v1.Images/List', request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.SerializeToString, response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.FromString, ) self.Create = channel.unary_unary( '/containerd.services.images.v1.Images/Create', request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.SerializeToString, response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.FromString, ) self.Update = channel.unary_unary( '/containerd.services.images.v1.Images/Update', request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.SerializeToString, response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.FromString, ) self.Delete = channel.unary_unary( '/containerd.services.images.v1.Images/Delete', request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class ImagesServicer(object): """Images is a service that allows one to register images with containerd. In containerd, an image is merely the mapping of a name to a content root, described by a descriptor. The behavior and state of image is purely dictated by the type of the descriptor. From the perspective of this service, these references are mostly shallow, in that the existence of the required content won't be validated until required by consuming services. As such, this can really be considered a "metadata service". """ def Get(self, request, context): """Get returns an image by name. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def List(self, request, context): """List returns a list of all images known to containerd. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Create(self, request, context): """Create an image record in the metadata store. The name of the image must be unique. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Update(self, request, context): """Update assigns the name to a given target image based on the provided image. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Delete(self, request, context): """Delete deletes the image by name. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ImagesServicer_to_server(servicer, server): rpc_method_handlers = { 'Get': grpc.unary_unary_rpc_method_handler( servicer.Get, request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.FromString, response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.SerializeToString, ), 'List': grpc.unary_unary_rpc_method_handler( servicer.List, request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.FromString, response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.SerializeToString, ), 'Create': grpc.unary_unary_rpc_method_handler( servicer.Create, request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.FromString, response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.SerializeToString, ), 'Update': grpc.unary_unary_rpc_method_handler( servicer.Update, request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.FromString, response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.SerializeToString, ), 'Delete': grpc.unary_unary_rpc_method_handler( servicer.Delete, request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'containerd.services.images.v1.Images', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Images(object): """Images is a service that allows one to register images with containerd. In containerd, an image is merely the mapping of a name to a content root, described by a descriptor. The behavior and state of image is purely dictated by the type of the descriptor. From the perspective of this service, these references are mostly shallow, in that the existence of the required content won't be validated until required by consuming services. As such, this can really be considered a "metadata service". """ @staticmethod def Get(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Get', containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.SerializeToString, containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def List(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/List', containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.SerializeToString, containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Create(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Create', containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.SerializeToString, containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Update(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Update', containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.SerializeToString, containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Delete(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Delete', containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
47.273859
136
0.698762
1,260
11,393
5.980952
0.119841
0.066879
0.078025
0.089172
0.857086
0.85284
0.842489
0.812898
0.770303
0.770303
0
0.008531
0.238655
11,393
240
137
47.470833
0.860272
0.178355
0
0.490798
1
0
0.078789
0.050923
0
0
0
0
0
1
0.07362
false
0
0.018405
0.030675
0.141104
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b987499f9b20405c786ea15f917a86bdeebc41e0
1,548
py
Python
code/sm_matrices.py
naoufal51/orion
0beb3020a3ca0fa7a4113db23ef4767ac3d63624
[ "MIT" ]
13
2017-04-15T16:46:34.000Z
2019-05-07T06:33:23.000Z
code/sm_matrices.py
naoufal51/orion
0beb3020a3ca0fa7a4113db23ef4767ac3d63624
[ "MIT" ]
3
2017-07-26T09:01:26.000Z
2020-10-01T16:25:08.000Z
code/sm_matrices.py
naoufal51/orion
0beb3020a3ca0fa7a4113db23ef4767ac3d63624
[ "MIT" ]
8
2017-08-08T21:15:35.000Z
2021-08-07T09:12:54.000Z
"""sm_matrices.py sm matrices used by the intel 5300 agn Naoufal Mahfoudi (c) 2016 mohamed-naoufal.mahfoudi@inria.fr """ import numpy as np # sm_1 = 1 # # sm_2_20 = np.matrix('1 1; 1 -1') / np.sqrt(2) # # sm_2_40 = np.matrix('1 1; 1j 1') / np.sqrt(2) # # sm_3_20 = np.matrix('-2*np.pi/16 -2*np.pi/(80/33) 2*np.pi/(80/3);' # '2*np.pi/(80/23) 2*np.pi/(48/13) 2*np.pi/(240/13);' # '-2*np.pi/(80/13) 2*np.pi/(240/37) 2*np.pi/(48/13)') # sm_3_20 = np.exp(1j * sm_3_20) / np.sqrt(3) # # sm_3_40 = np.matrix('-2*np.pi/16 -2*np.pi/(80/13) 2*np.pi/(80/23);' # '2*np.pi/(80/37) 2*np.pi/(48/11) 2*np.pi/(240/107);' # '-2*np.pi/(80/7) 2*np.pi/(240/83) 2*np.pi/(48/11)') # sm_3_20 = np.exp(1j * sm_3_40) / np.sqrt(3) sm_1 = 1 sm_2_20 = np.matrix([[1, 1], [1, -1]]) / np.sqrt(2) sm_2_40 = np.matrix([[1, 1], [1j, 1]]) / np.sqrt(2) sm_3_20 = np.matrix([[-2 * np.pi / 16, -2 * np.pi / (80 / 33), 2 * np.pi / (80 / 3)], [2 * np.pi / (80 / 23), 2 * np.pi / (48 / 13), 2 * np.pi / (240 / 13)], [-2 * np.pi / (80 / 13), 2 * np.pi / (240 / 37), 2 * np.pi / (48 / 13)]]) sm_3_20 = np.exp(1j * sm_3_20) / np.sqrt(3) sm_3_40 = np.matrix([[-2 * np.pi / 16, -2 * np.pi / (80 / 13), 2 * np.pi / (80 / 23)], [-2 * np.pi / (80 / 37), -2 * np.pi / (48 / 11), -2 * np.pi / (240 / 107)], [2 * np.pi / (80 / 7), -2 * np.pi / (240 / 83), -2 * np.pi / (48 / 11)]]) sm_3_40 = np.exp(1j * sm_3_40) / np.sqrt(3)
36.857143
96
0.463824
322
1,548
2.121118
0.130435
0.158126
0.263543
0.163982
0.84041
0.84041
0.84041
0.84041
0.837482
0.781845
0
0.236234
0.27261
1,548
41
97
37.756098
0.370337
0.471576
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
b99aeec863c277d8b0ead15aec523a9ccb9ea599
17,685
py
Python
tests/api/crud/test_trigger_tags.py
bossjones/ultron8
45db73d32542a844570d44bc83defa935e15803f
[ "Apache-2.0", "MIT" ]
null
null
null
tests/api/crud/test_trigger_tags.py
bossjones/ultron8
45db73d32542a844570d44bc83defa935e15803f
[ "Apache-2.0", "MIT" ]
43
2019-06-01T23:08:32.000Z
2022-02-07T22:24:53.000Z
tests/api/crud/test_trigger_tags.py
bossjones/ultron8
45db73d32542a844570d44bc83defa935e15803f
[ "Apache-2.0", "MIT" ]
null
null
null
from fastapi.encoders import jsonable_encoder from freezegun import freeze_time import pytest from sqlalchemy.orm import Session from ultron8.api import crud from ultron8.api.db_models.trigger import TriggerDB from ultron8.api.models.packs import PacksCreate from ultron8.api.models.trigger import ( TriggerBaseDB, TriggerBaseInDB, TriggerCreate, TriggerInstanceBaseDB, TriggerInstanceBaseInDB, TriggerInstanceCreate, TriggerInstanceUpdate, TriggerTagsBase, TriggerTagsBaseInDB, TriggerTagsCreate, TriggerTagsUpdate, TriggerTypeBase, TriggerTypeBaseInDB, TriggerTypeCreate, TriggerTypeUpdate, TriggerUpdate, ) from tests.utils.utils import random_lower_string TRIGGER_0 = { "name": "ultron8.test.trigger0", "pack": "dummy_pack_1", "description": "test trigger", "type": "dummy_pack_1.ultron8.test.triggertype0", "parameters": {}, } TRIGGER_1 = { "name": "ultron8.test.trigger1", "pack": "dummy_pack_1", "description": "test trigger", "type": "dummy_pack_1.ultron8.test.triggertype1", "parameters": {}, } TRIGGER_2 = { "name": "ultron8.test.trigger2", "pack": "dummy_pack_1", "description": "test trigger", "type": "dummy_pack_1.ultron8.test.triggertype2", "parameters": {"param1": {"foo": "bar"}}, } TRIGGERTYPE_0 = { "name": "ultron8.test.triggertype0", "pack": "dummy_pack_1", "description": "test trigger", "payload_schema": {"tp1": None, "tp2": None, "tp3": None}, "parameters_schema": {}, } TRIGGERTYPE_1 = { "name": "ultron8.test.triggertype1", "pack": "dummy_pack_1", "description": "test trigger", "payload_schema": {"tp1": None, "tp2": None, "tp3": None}, } TRIGGERTYPE_2 = { "name": "ultron8.test.triggertype2", "pack": "dummy_pack_1", "description": "test trigger", "payload_schema": {"tp1": None, "tp2": None, "tp3": None}, "parameters_schema": {"param1": {"type": "object"}}, } @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_create_trigger_tags(db: Session) -> None: packs_shared_name = random_lower_string() packs_name = packs_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = packs_shared_name trigger_name = TRIGGER_0["name"] trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) assert trigger.name == trigger_name assert trigger.packs_name == trigger_packs_name assert trigger.description == trigger_description assert trigger.type == trigger_type assert trigger.parameters == trigger_parameters @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_get_trigger_tags(db: Session) -> None: packs_shared_name = random_lower_string() packs_name = packs_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = packs_shared_name trigger_name = TRIGGER_0["name"] trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) trigger_2 = crud.trigger.get(db, trigger_id=trigger.id) assert jsonable_encoder(trigger) == jsonable_encoder(trigger_2) @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_get_by_ref_trigger_tags(db: Session) -> None: pack_shared_name = random_lower_string() packs_name = pack_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = pack_shared_name trigger_name = TRIGGER_0["name"] trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) ref_lookup = "{}.{}".format(packs_name, trigger.name) trigger_2 = crud.trigger.get_by_ref(db, ref=ref_lookup) assert jsonable_encoder(trigger) == jsonable_encoder(trigger_2) @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_get_by_name_trigger_tags(db: Session) -> None: pack_shared_name = random_lower_string() packs_name = pack_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = pack_shared_name trigger_name = random_lower_string() trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) trigger_2 = crud.trigger.get_by_name(db, name=trigger_name) assert jsonable_encoder(trigger) == jsonable_encoder(trigger_2) @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_get_multi_trigger_tags(db: Session) -> None: pack_shared_name = random_lower_string() packs_name = pack_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = pack_shared_name trigger_name0 = random_lower_string() trigger_packs_name0 = packs_name trigger_description0 = TRIGGER_0["description"] trigger_type0 = TRIGGER_0["type"] trigger_parameters0 = TRIGGER_0["parameters"] trigger_name1 = random_lower_string() trigger_packs_name1 = packs_name trigger_description1 = TRIGGER_1["description"] trigger_type1 = TRIGGER_1["type"] trigger_parameters1 = TRIGGER_1["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in0 = TriggerCreate( name=trigger_name0, packs_name=trigger_packs_name0, description=trigger_description0, type=trigger_type0, parameters=trigger_parameters0, ) trigger_in1 = TriggerCreate( name=trigger_name1, packs_name=trigger_packs_name1, description=trigger_description1, type=trigger_type1, parameters=trigger_parameters1, ) trigger0 = crud.trigger.create(db, trigger_in=trigger_in0, packs_id=packs.id) trigger1 = crud.trigger.create(db, trigger_in=trigger_in1, packs_id=packs.id) trigger_2 = crud.trigger.get_multi(db) for t in trigger_2: assert type(t) == TriggerDB @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_get_multi_by_packs_id_trigger_tags(db: Session) -> None: pack_shared_name = random_lower_string() packs_name = pack_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = pack_shared_name trigger_name0 = random_lower_string() trigger_packs_name0 = packs_name trigger_description0 = TRIGGER_0["description"] trigger_type0 = TRIGGER_0["type"] trigger_parameters0 = TRIGGER_0["parameters"] trigger_name1 = random_lower_string() trigger_packs_name1 = packs_name trigger_description1 = TRIGGER_1["description"] trigger_type1 = TRIGGER_1["type"] trigger_parameters1 = TRIGGER_1["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in0 = TriggerCreate( name=trigger_name0, packs_name=trigger_packs_name0, description=trigger_description0, type=trigger_type0, parameters=trigger_parameters0, ) trigger_in1 = TriggerCreate( name=trigger_name1, packs_name=trigger_packs_name1, description=trigger_description1, type=trigger_type1, parameters=trigger_parameters1, ) trigger0 = crud.trigger.create(db, trigger_in=trigger_in0, packs_id=packs.id) trigger1 = crud.trigger.create(db, trigger_in=trigger_in1, packs_id=packs.id) trigger_2 = crud.trigger.get_multi_by_packs_id(db, packs_id=packs.id, limit=2) for t in trigger_2: assert type(t) == TriggerDB assert t.packs_id == packs.id @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_update_trigger_tags(db: Session) -> None: packs_shared_name = random_lower_string() packs_name = packs_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = packs_shared_name trigger_name = TRIGGER_0["name"] trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) description2 = random_lower_string() trigger_update = TriggerUpdate(description=description2) trigger2 = crud.trigger.update( db_session=db, trigger=trigger, trigger_in=trigger_update ) assert trigger.name == trigger2.name assert trigger.packs_name == trigger2.packs_name assert trigger.description == description2 assert trigger.type == trigger2.type assert trigger.parameters == trigger2.parameters @freeze_time("2019-07-25 01:11:00.740428") @pytest.mark.triggeronly @pytest.mark.unittest def test_delete_trigger_tags(db: Session) -> None: packs_shared_name = random_lower_string() packs_name = packs_shared_name packs_description = random_lower_string() packs_keywords = random_lower_string() packs_version = random_lower_string() packs_python_versions = random_lower_string() packs_author = random_lower_string() packs_email = "info@theblacktonystark.com" packs_contributors = random_lower_string() packs_files = random_lower_string() packs_path = random_lower_string() packs_ref = packs_shared_name trigger_name = TRIGGER_0["name"] trigger_packs_name = packs_name trigger_description = TRIGGER_0["description"] trigger_type = TRIGGER_0["type"] trigger_parameters = TRIGGER_0["parameters"] packs_in = PacksCreate( name=packs_name, description=packs_description, keywords=packs_keywords, version=packs_version, python_versions=packs_python_versions, author=packs_author, email=packs_email, contributors=packs_contributors, files=packs_files, path=packs_path, ref=packs_ref, ) packs = crud.packs.create(db, packs_in=packs_in) trigger_in = TriggerCreate( name=trigger_name, packs_name=trigger_packs_name, description=trigger_description, type=trigger_type, parameters=trigger_parameters, ) trigger = crud.trigger.create(db, trigger_in=trigger_in, packs_id=packs.id) trigger2 = crud.trigger.remove(db_session=db, trigger_id=trigger.id) trigger3 = crud.trigger.get(db_session=db, trigger_id=trigger.id) assert trigger3 is None assert trigger2.id == trigger.id assert trigger2.name == trigger.name assert trigger2.packs_name == trigger.packs_name assert trigger2.description == trigger2.description assert trigger2.type == trigger.type assert trigger2.parameters == trigger.parameters
32.096189
82
0.715069
2,092
17,685
5.677342
0.059273
0.073167
0.113076
0.133367
0.86436
0.853498
0.8439
0.839017
0.839017
0.833291
0
0.022732
0.194063
17,685
550
83
32.154545
0.810566
0
0
0.772632
0
0
0.077693
0.026011
0
0
0
0
0.048421
1
0.016842
false
0
0.018947
0
0.035789
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b9f227157f9ae198fc5fb6787602709419f32850
1,351
py
Python
data/datasets/7b/voices.inc.py
CherokeeLanguage/Cherokee-TTS
de034392ba8934fd8468617ebd7fcd8dace91162
[ "MIT" ]
5
2020-10-12T16:07:53.000Z
2022-01-12T17:52:43.000Z
data/datasets/5f/voices.inc.py
CherokeeLanguage/Cherokee-TTS
de034392ba8934fd8468617ebd7fcd8dace91162
[ "MIT" ]
null
null
null
data/datasets/5f/voices.inc.py
CherokeeLanguage/Cherokee-TTS
de034392ba8934fd8468617ebd7fcd8dace91162
[ "MIT" ]
1
2020-10-30T06:53:18.000Z
2020-10-30T06:53:18.000Z
voices: list[str] = ["03-chr", "10-chr", "02-ru", "01-m-ssw-chr", "04-fr", "05-ru", "360-en-m", "329-en-f", "361-en-f", "308-en-f", "311-en-m", "334-en-m", "362-en-f", "330-en-f", "339-en-f", "294-en-f", "310-en-f", "318-en-f", "333-en-f", "305-en-f", "297-en-f", "301-en-f", "27-de", "341-en-f", "299-en-f", "300-en-f", "345-en-m", "11-fr", "13-de", "01-nl", "04-ru", "306-en-f", "08-nl", "cno-f-chr_2", "21-fr", "52-de", "24-de", "03-nl", "36-de", "09-fr", "14-de", "19-fr", "21-de", "20-fr", "01-de", "01-m-walc1", "22-de", "16-fr", "cno-m-chr_2", "04-m-walc1", "37-de", "49-de", "07-fr", "06-ru", "07-zh", "03-zh", "06-de", "17-de", "02-m-walc1", "51-de", "06-f-walc1", "06-fr", "02-m-df-chr", "01-m-wwacc", "45-de", "12-de", "02-de", "41-de", "25-fr", "04-f-walc1", "03-ru", "06-nl", "07-de", "47-de", "04-de", "04-nl", "06-zh", "08-fr", "01-m-df-chr", "50-de", "40-de", "02-zh", "19-de", "48-de", "01-ru", "23-de", "03-f-walc1", "32-de", "cno-m-chr_1", "17-fr", "46-de", "09-nl", "cno-f-chr_5", "44-de", "14-fr", "18-fr", "10-fr", "43-de", "26-de", "01-fr", "09-de", "10-nl", "31-de", "02-fr", "12-nl", "02-nl", "10-de", "29-de", "13-fr", "05-fr", "05-zh", "05-de", "26-fr", "07-nl", "11-nl", "cno-f-chr_3", "25-de", "11-de", "33-de", "22-fr", "15-fr", "01-zh", "04-chr", "01-f-walc1", "cno-f-chr_1", "02-f-walc1", "05-f-walc1", "01-f-wwacc", ]
675.5
1,350
0.486306
309
1,351
2.106796
0.281553
0.078341
0.043011
0.041475
0
0
0
0
0
0
0
0.229696
0.097705
1,351
1
1,351
1,351
0.304348
0
0
0
0
0
0.603997
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
b9f4604b081f940cee670899b0f3a63167bc3e8c
92
py
Python
wildfirepy/net/__init__.py
ram-nad/wildfirepy
7c449357ddbafa9ef9797b58fabf44d8a1f54137
[ "MIT" ]
null
null
null
wildfirepy/net/__init__.py
ram-nad/wildfirepy
7c449357ddbafa9ef9797b58fabf44d8a1f54137
[ "MIT" ]
4
2020-05-07T07:04:55.000Z
2020-05-08T12:47:41.000Z
wildfirepy/net/__init__.py
ram-nad/wildfirepy
7c449357ddbafa9ef9797b58fabf44d8a1f54137
[ "MIT" ]
null
null
null
from wildfirepy.net import util from wildfirepy.net import usgs __all__ = ['usgs', 'util']
18.4
31
0.75
13
92
5
0.538462
0.430769
0.523077
0.707692
0
0
0
0
0
0
0
0
0.141304
92
4
32
23
0.822785
0
0
0
0
0
0.086957
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
e01793be5f44283b451c12c8958c07362787f353
4,174
py
Python
tests/test_io.py
rahulbhadani/hcipy
b52726cb9502b5225ddff9d7b1ff417f2350cda8
[ "MIT" ]
null
null
null
tests/test_io.py
rahulbhadani/hcipy
b52726cb9502b5225ddff9d7b1ff417f2350cda8
[ "MIT" ]
null
null
null
tests/test_io.py
rahulbhadani/hcipy
b52726cb9502b5225ddff9d7b1ff417f2350cda8
[ "MIT" ]
null
null
null
import numpy as np import os from hcipy import * """ def test_write_mode_basis(): # grid for the test mode basis pupil_grid = make_pupil_grid(128) # generating a test mode basis test_mode_basis = make_zernike_basis(num_modes=20, D=1, grid=pupil_grid, starting_mode=1, ansi=False, radial_cutoff=True) file_name = 'write_mode_basis_test.fits' # saving the mode basis write_mode_basis(test_mode_basis, file_name) # loading it test_mode_basis_array_read = read_fits(file_name) # comparing the arrays test_mode_basis_array = np.array([test_mode_basis]) test_mode_basis_array = np.reshape(test_mode_basis_array, [20,128,128]) assert np.isclose(test_mode_basis_array, test_mode_basis_array_read, rtol=1e-02, atol=1e-05).all() # Remove temporary file. os.remove(file_name) def test_read_mode_basis_1(): #------------------------------- # testing a square mode basis that we read without providing a grid #------------------------------- # grid for the test mode basis pupil_grid = make_pupil_grid(128) # testing a square mode basis defined in the pupil plane test_mode_basis = make_zernike_basis(num_modes=20, D=1, grid=pupil_grid, starting_mode=1, ansi=False, radial_cutoff=True) # writing the mode basis file_name = 'read_mode_basis_test_1.fits' write_mode_basis(test_mode_basis, file_name) # and loading it again test_mode_basis_read = read_mode_basis(file_name, grid=None) # checking if the modes are still the same for mode, mode_read in zip(test_mode_basis, test_mode_basis_read): assert np.isclose(mode, mode_read, rtol=1e-02, atol=1e-05).all() # checking if the grid is correct assert np.isclose(pupil_grid.x, test_mode_basis_read[0].grid.x, rtol=1e-02, atol=1e-05).all() assert np.isclose(pupil_grid.y, test_mode_basis_read[0].grid.y, rtol=1e-02, atol=1e-05).all() # Remove temporary file. os.remove(file_name) def test_read_mode_basis_2(): #------------------------------- # testing a square mode basis that we read with providing a grid #------------------------------- # grid for the test mode basis pupil_grid = make_pupil_grid(128, 3) # testing a square mode basis defined in the pupil plane test_mode_basis = make_zernike_basis(num_modes=20, D=3, grid=pupil_grid, starting_mode=1, ansi=False, radial_cutoff=True) # writing the mode basis file_name = 'read_mode_basis_test_2.fits' write_mode_basis(test_mode_basis, file_name) # and loading it again test_mode_basis_read = read_mode_basis(file_name, grid=pupil_grid) # checking if the modes are still the same for mode, mode_read in zip(test_mode_basis, test_mode_basis_read): assert np.isclose(mode, mode_read, rtol=1e-02, atol=1e-05).all() # checking if the grid is correct assert np.isclose(pupil_grid.x, test_mode_basis_read[0].grid.x, rtol=1e-02, atol=1e-05).all() assert np.isclose(pupil_grid.y, test_mode_basis_read[0].grid.y, rtol=1e-02, atol=1e-05).all() # Remove temporary file. os.remove(file_name) def test_read_mode_basis_3(): #------------------------------- # testing a non-square mode basis that we read with providing a grid #------------------------------- # grid for the test mode basis pupil_grid = make_uniform_grid([128,256], [128,256], center=0, has_center=False) # testing a square mode basis defined in the pupil plane test_mode_basis = [] for i in np.arange(20): test_mode_basis.append(Field(np.random.rand(128*256), pupil_grid)) test_mode_basis = ModeBasis(test_mode_basis) # writing the mode basis file_name = 'read_mode_basis_test_3.fits' write_mode_basis(test_mode_basis, file_name) # and loading it again test_mode_basis_read = read_mode_basis(file_name, grid=pupil_grid) # checking if the modes are still the same for mode, mode_read in zip(test_mode_basis, test_mode_basis_read): assert np.isclose(mode, mode_read, rtol=1e-02, atol=1e-05).all() # checking if the grid is correct assert np.isclose(pupil_grid.x, test_mode_basis_read[0].grid.x, rtol=1e-02, atol=1e-05).all() assert np.isclose(pupil_grid.y, test_mode_basis_read[0].grid.y, rtol=1e-02, atol=1e-05).all() # Remove temporary file. os.remove(file_name) """
35.675214
122
0.723527
704
4,174
4.015625
0.130682
0.200566
0.174744
0.072161
0.855677
0.831977
0.818182
0.818182
0.789883
0.789883
0
0.034157
0.137278
4,174
117
123
35.675214
0.750903
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
10
e0297dda2f8aa945c1bdc99cb0ae6de51836f3a5
77
py
Python
CodeUp/1534.py
chae-heechan/Algorithm_Study
183a77e2cfe352cd82fb5e988b493082529a73dd
[ "MIT" ]
null
null
null
CodeUp/1534.py
chae-heechan/Algorithm_Study
183a77e2cfe352cd82fb5e988b493082529a73dd
[ "MIT" ]
null
null
null
CodeUp/1534.py
chae-heechan/Algorithm_Study
183a77e2cfe352cd82fb5e988b493082529a73dd
[ "MIT" ]
null
null
null
# 함수로 실수(double) 3.1415926535897 리턴하기 def f(): print(3.1415926535897) f()
19.25
37
0.688312
12
77
4.416667
0.75
0.528302
0
0
0
0
0
0
0
0
0
0.430769
0.155844
77
4
38
19.25
0.384615
0.454545
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0
0
0.333333
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
7
e0529bb59750569b4010e063ae86f405220f24ca
90
py
Python
principal.py
7RogerIkeda/Travis_teste
034d625f32f3487c4c4923ea2d868d37bf0677d2
[ "Apache-2.0" ]
null
null
null
principal.py
7RogerIkeda/Travis_teste
034d625f32f3487c4c4923ea2d868d37bf0677d2
[ "Apache-2.0" ]
null
null
null
principal.py
7RogerIkeda/Travis_teste
034d625f32f3487c4c4923ea2d868d37bf0677d2
[ "Apache-2.0" ]
null
null
null
def somar(x,y): return x+ y def subtrair (x,y): return x- y def mult(): pass
11.25
19
0.555556
17
90
2.941176
0.470588
0.16
0.32
0.36
0.52
0.52
0
0
0
0
0
0
0.3
90
7
20
12.857143
0.793651
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.166667
0
0.333333
0.833333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
8
e07216ae0a66acc75116aaf06810abd64420d527
18,981
py
Python
pymove/tests/test_semantic.py
JoaoCarabetta/PyMove
0b712a9b65e0a5666db4bfecee3cd038ed155f7d
[ "MIT" ]
1
2022-01-25T19:57:23.000Z
2022-01-25T19:57:23.000Z
pymove/tests/test_semantic.py
safarzadeh-reza/PyMove
c04f365499cc201c14d4fcf86e40e8fce43e2906
[ "MIT" ]
null
null
null
pymove/tests/test_semantic.py
safarzadeh-reza/PyMove
c04f365499cc201c14d4fcf86e40e8fce43e2906
[ "MIT" ]
null
null
null
from numpy import nan from pandas import DataFrame, Timestamp from pandas.testing import assert_frame_equal from pymove import MoveDataFrame, semantic from pymove.utils.constants import ( BLOCK, DATETIME, LATITUDE, LONGITUDE, SEGMENT_STOP, TRAJ_ID, ) list_data = [ [39.984094, 116.319236, '2008-10-23 05:53:05', 1], [39.984198, 116.319322, '2008-10-23 05:53:06', 1], [39.984224, 116.319402, '2008-10-23 05:53:11', 1], [39.984211, 116.319389, '2008-10-23 05:53:16', 1], [39.984217, 116.319422, '2008-10-23 05:53:21', 1], [39.984710, 116.319865, '2008-10-23 05:53:23', 1], [39.984674, 116.319810, '2008-10-23 05:53:28', 1], [39.984623, 116.319773, '2008-10-23 05:53:33', 1], [39.984606, 116.319732, '2008-10-23 05:53:38', 1], [39.984555, 116.319728, '2008-10-23 05:53:43', 1] ] list_data_2 = [ [39.984094, 116.319236, '2008-10-23 05:53:03', 1], [39.984710, 116.319865, '2008-10-23 05:53:13', 1], [39.984710, 116.319865, '2008-10-23 05:53:23', 1], [39.984710, 116.319865, '2008-10-23 05:53:33', 1], [39.984710, 116.319865, '2008-10-23 05:53:43', 1], [39.984674, 116.319810, '2008-10-23 05:53:53', 1], [39.984710, 116.319865, '2008-10-23 05:54:03', 1], [39.984710, 116.319865, '2008-10-23 05:54:13', 1], [39.984710, 116.319865, '2008-10-23 05:54:23', 1], [39.984555, 116.319728, '2008-10-23 05:54:33', 1] ] def _default_move_df(data=None): if data is None: data = list_data return MoveDataFrame( data=data, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME, traj_id=TRAJ_ID, ) def test_end_create_operation(): move_df = _default_move_df() expected = DataFrame( data=[ [39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1], [39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1], [39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 1], [39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 1], [39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 1], [39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 1], [39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), 1], [39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 1], [39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 1], [39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 1] ], columns=['lat', 'lon', 'datetime', 'id'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) new_move_df = semantic._end_create_operation(move_df, 'lat', False) assert_frame_equal(new_move_df, expected) semantic._end_create_operation(move_df, 'lat', True) assert_frame_equal(move_df, expected) def test_process_simple_filter(): move_df = _default_move_df() expected = DataFrame( data=[ [39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, False], [39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, True], [39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 1, True], [39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 1, True], [39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 1, True], [39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 1, True], [39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), 1, True], [39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 1, True], [39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 1, True], [39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 1, True] ], columns=['lat', 'lon', 'datetime', 'id', 'new_label'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) new_move_df = semantic._process_simple_filter(move_df, 'new_label', 'lat', 39.984217, False ) assert_frame_equal(new_move_df, expected) semantic._process_simple_filter(move_df, 'new_label', 'lat', 39.984217, True) assert_frame_equal(move_df, expected) def test_create_or_update_out_of_the_bbox(): bbox = (39.984217, 116.319236, 39.98471, 116.319865) move_df = _default_move_df() expected = DataFrame( data=[ [39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, True], [39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, True], [39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 1, False], [39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 1, True], [39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 1, False], [39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 1, False], [39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), 1, False], [39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 1, False], [39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 1, False], [39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 1, False] ], columns=['lat', 'lon', 'datetime', 'id', 'out_bbox'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) semantic.create_or_update_out_of_the_bbox(move_df, bbox) assert_frame_equal(move_df, expected) def test_create_or_update_gps_deactivated_signal(): move_df = _default_move_df() expected = DataFrame( data=[ [1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), nan, 1.0, nan, False], [1, 39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1.0, 5.0, 6.0, True], [1, 39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 5.0, 5.0, 10.0, True], [1, 39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 5.0, 5.0, 10.0, True], [1, 39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 5.0, 2.0, 7.0, True], [1, 39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 2.0, 5.0, 7.0, True], [1, 39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), 5.0, 5.0, 10.0, True], [1, 39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 5.0, 5.0, 10.0, True], [1, 39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 5.0, 5.0, 10.0, True], [1, 39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 5.0, nan, nan, True] ], columns=['id', 'lat', 'lon', 'datetime', 'time_to_prev', 'time_to_next', 'time_prev_to_next', 'deactivated_signal'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) new_move_df = semantic.create_or_update_gps_deactivated_signal( move_df, max_time_between_adj_points=5.0, inplace=False) assert_frame_equal(new_move_df, expected) semantic.create_or_update_gps_deactivated_signal(move_df, max_time_between_adj_points=5.0) assert_frame_equal(move_df, expected) def test_create_or_update_gps_jump(): move_df = _default_move_df() expected = DataFrame( data=[ [1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), nan, 13.690153, nan, True], [1, 39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 13.690153, 7.403788, 20.223428, True], [1, 39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 7.403788, 1.821083, 5.888579, True], [1, 39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 1.821083, 2.889671, 1.873356, False], [1, 39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 2.889671, 66.555997, 68.727260, True], [1, 39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 66.555997, 6.162987, 60.622358, True], [1, 39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), 6.162987, 6.488225, 12.450907, True], [1, 39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 6.488225, 3.971848, 10.066577, True], [1, 39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 3.971848, 5.681172, 8.477733, True], [1, 39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 5.681172, nan, nan, True] ], columns=['id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'dist_to_next', 'dist_prev_to_next', 'gps_jump'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) new_move_df = semantic.create_or_update_gps_jump(move_df, max_dist_between_adj_points=5.0, inplace=False) assert_frame_equal(new_move_df, expected) semantic.create_or_update_gps_jump(move_df, max_dist_between_adj_points=5.0) assert_frame_equal(move_df, expected) def test_create_or_update_short_trajectory(): move_df = _default_move_df() move_df.at[[6, 7, 8, 9], 'id'] = 2 expected = DataFrame( data=[ [1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), nan, nan, nan, 1, False], [1, 39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 13.690153, 1.0, 13.690153, 1, False], [1, 39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 7.403788, 5.0, 1.480758, 1, False], [1, 39.984211, 116.319389, Timestamp('2008-10-23 05:53:16'), 1.821083, 5.0, 0.364217, 1, False], [1, 39.984217, 116.319422, Timestamp('2008-10-23 05:53:21'), 2.889671, 5.0, 0.577934, 1, False], [1, 39.984710, 116.319865, Timestamp('2008-10-23 05:53:23'), 66.555997, 2.0, 33.277998, 1, False], [2, 39.984674, 116.319810, Timestamp('2008-10-23 05:53:28'), nan, nan, nan, 2, True], [2, 39.984623, 116.319773, Timestamp('2008-10-23 05:53:33'), 6.488225, 5.0, 1.297645, 2, True], [2, 39.984606, 116.319732, Timestamp('2008-10-23 05:53:38'), 3.971848, 5.0, 0.794370, 2, True], [2, 39.984555, 116.319728, Timestamp('2008-10-23 05:53:43'), 5.681172, 5.0, 1.136234, 2, True] ], columns=['id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'tid_part', 'short_traj'], index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) new_move_df = semantic.create_or_update_short_trajectory(move_df, k_segment_max=4, inplace=False) assert_frame_equal(new_move_df, expected) assert ('short_traj' not in move_df) semantic.create_or_update_short_trajectory(move_df, k_segment_max=4) assert_frame_equal(move_df, expected) def test_create_or_update_gps_block_signal(): move_df = _default_move_df(list_data_2) cols = [ 'tid_part', 'id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'block_signal' ] expected = DataFrame(data=[ [1, 1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:03'), nan, nan, nan, False], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:13'), nan, nan, nan, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:23'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:33'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:43'), 0.0, 10.0, 0.0, True], [3, 1, 39.984674, 116.31981, Timestamp('2008-10-23 05:53:53'), nan, nan, nan, False], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:03'), nan, nan, nan, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:13'), 0.0, 10.0, 0.0, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:23'), 0.0, 10.0, 0.0, True], [5, 1, 39.984555, 116.319728, Timestamp('2008-10-23 05:54:33'), nan, nan, nan, False], ], columns=cols, index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ) new_move_df = semantic.create_or_update_gps_block_signal( move_df, max_time_stop=15, inplace=False ) assert_frame_equal(new_move_df, expected) assert BLOCK not in move_df semantic.create_or_update_gps_block_signal(move_df, max_time_stop=15) assert_frame_equal(move_df, expected) def test_filter_block_signal_by_repeated_amount_of_points(): move_df = _default_move_df(list_data_2) cols = [ 'tid_part', 'id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'block_signal' ] expected = DataFrame(data=[ [1, 1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:03'), nan, nan, nan, False], [3, 1, 39.984674, 116.31981, Timestamp('2008-10-23 05:53:53'), nan, nan, nan, False], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:03'), nan, nan, nan, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:13'), 0.0, 10.0, 0.0, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:23'), 0.0, 10.0, 0.0, True], [5, 1, 39.984555, 116.319728, Timestamp('2008-10-23 05:54:33'), nan, nan, nan, False], ], columns=cols, index=[0, 5, 6, 7, 8, 9] ) new_move_df = semantic.filter_block_signal_by_repeated_amount_of_points( move_df, max_time_stop=15, amount_max_of_points_stop=3, inplace=False ) assert_frame_equal(new_move_df, expected) assert BLOCK not in move_df semantic.filter_block_signal_by_repeated_amount_of_points( move_df, max_time_stop=15, amount_max_of_points_stop=3, filter_out=True, inplace=True ) expected = DataFrame(data=[ [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:13'), nan, nan, nan, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:23'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:33'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:43'), 0.0, 10.0, 0.0, True], ], columns=cols, index=[1, 2, 3, 4] ) assert_frame_equal(move_df, expected) def test_filter_block_signal_by_time(): move_df = _default_move_df(list_data_2) cols = [ 'tid_part', 'id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'block_signal' ] expected = DataFrame(data=[ [1, 1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:03'), nan, nan, nan, False], [3, 1, 39.984674, 116.31981, Timestamp('2008-10-23 05:53:53'), nan, nan, nan, False], [5, 1, 39.984555, 116.319728, Timestamp('2008-10-23 05:54:33'), nan, nan, nan, False], ], columns=cols, index=[0, 5, 9] ) new_move_df = semantic.filter_block_signal_by_time( move_df, max_time_stop=15, inplace=False ) # assert False assert_frame_equal(new_move_df, expected) assert BLOCK not in move_df semantic.filter_block_signal_by_time( move_df, max_time_stop=15, filter_out=True, inplace=True ) expected = DataFrame(data=[ [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:13'), nan, nan, nan, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:23'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:33'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:43'), 0.0, 10.0, 0.0, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:03'), nan, nan, nan, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:13'), 0.0, 10.0, 0.0, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:23'), 0.0, 10.0, 0.0, True], ], columns=cols, index=[1, 2, 3, 4, 6, 7, 8] ) assert_frame_equal(move_df, expected) def test_filter_longer_time_to_stop_segment_by_id(): move_df = _default_move_df(list_data_2) cols = [ 'segment_stop', 'id', 'lat', 'lon', 'datetime', 'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'stop' ] expected = DataFrame(data=[ [1, 1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:03'), nan, nan, nan, False], [3, 1, 39.984674, 116.31981, Timestamp('2008-10-23 05:53:53'), nan, nan, nan, False], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:03'), nan, nan, nan, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:13'), 0.0, 10.0, 0.0, True], [4, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:54:23'), 0.0, 10.0, 0.0, True], [5, 1, 39.984555, 116.319728, Timestamp('2008-10-23 05:54:33'), nan, nan, nan, False], ], columns=cols, index=[0, 5, 6, 7, 8, 9] ) new_move_df = semantic.filter_longer_time_to_stop_segment_by_id( move_df, dist_radius=5, time_radius=10, inplace=False ) assert_frame_equal(new_move_df, expected) assert SEGMENT_STOP not in move_df expected = DataFrame(data=[ [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:13'), nan, nan, nan, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:23'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:33'), 0.0, 10.0, 0.0, True], [2, 1, 39.98471, 116.319865, Timestamp('2008-10-23 05:53:43'), 0.0, 10.0, 0.0, True], ], columns=cols, index=[1, 2, 3, 4] ) semantic.filter_longer_time_to_stop_segment_by_id( move_df, dist_radius=5, time_radius=10, filter_out=True, inplace=True ) assert_frame_equal(move_df, expected)
39.626305
86
0.552026
2,912
18,981
3.45261
0.056662
0.071613
0.095484
0.119355
0.905411
0.890591
0.889198
0.873085
0.865725
0.827432
0
0.334236
0.280122
18,981
478
87
39.709205
0.401566
0.000632
0
0.53202
0
0
0.152167
0
0
0
0
0
0.061576
1
0.027094
false
0
0.012315
0
0.041872
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
0ebf3ba7a7f17a15f702da54be665980146f4e40
262
py
Python
Sources/Workflows/Command-Help/alp/__init__.py
yagosys/AlfredWorkflow.com
9e5087e61fb89640a7a6ca89ba554303aec0b037
[ "MIT" ]
2,177
2015-01-02T09:56:51.000Z
2022-03-27T01:48:37.000Z
Sources/Workflows/Command-Help/alp/__init__.py
yagosys/AlfredWorkflow.com
9e5087e61fb89640a7a6ca89ba554303aec0b037
[ "MIT" ]
24
2015-01-02T19:11:51.000Z
2021-01-27T07:20:33.000Z
Sources/Workflows/Command-Help/alp/__init__.py
yagosys/AlfredWorkflow.com
9e5087e61fb89640a7a6ca89ba554303aec0b037
[ "MIT" ]
516
2015-01-02T18:48:29.000Z
2022-01-26T07:12:35.000Z
from .core import * try: from .item import * from .keychain import * from .settings import * from .mail import * from alp.request.request import * except ImportError: pass try: from .notification import * except ImportError: pass
18.714286
37
0.667939
31
262
5.645161
0.451613
0.228571
0.262857
0.308571
0
0
0
0
0
0
0
0
0.259542
262
13
38
20.153846
0.902062
0
0
0.461538
0
0
0
0
0
0
0
0
0
1
0
true
0.153846
0.692308
0
0.692308
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
7
0ed0dd673451109629565023694e7e4eff651b32
452,672
py
Python
tests/rsc/schemas/input_json_schemas_shopify.py
hotgluexyz/target-bigquery
8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f
[ "BSD-3-Clause" ]
10
2020-09-28T15:12:17.000Z
2021-12-03T12:39:23.000Z
tests/rsc/schemas/input_json_schemas_shopify.py
hotgluexyz/target-bigquery
8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f
[ "BSD-3-Clause" ]
26
2021-01-04T14:01:07.000Z
2022-03-27T22:53:34.000Z
tests/rsc/schemas/input_json_schemas_shopify.py
hotgluexyz/target-bigquery
8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f
[ "BSD-3-Clause" ]
39
2020-10-01T18:16:20.000Z
2022-03-11T16:14:41.000Z
""" shopify_orders_malformed: this schema is from here: https://bitbucket.org/analyticspros/dt-singerio-shopify/commits/ it has three instances of this: , { "properties": {}, "type": [ "null", "object" ] } These instances are inside anyOf. it's breaking the pipeline, if it's branch feature/schema-translation it's running fine if it's master branch This is incorrect schema because: 1) it has empty properties 2) if you remove this bit, new method schema conversion (simplify and convert) runs. No data loss happens. No schema change happens. So therefore, this bit doesn't really add anything. 3) Simplification step taken from target-postgres should remove all instances of anyOf. This anyOf persists. Ths is not normal, anyOf should be removed in this case. """ shopify_orders_malformed = """{"type":"SCHEMA", "stream": "orders", "tap_stream_id": "orders", "schema": { "properties": { "presentment_currency": { "type": [ "null", "string" ] }, "subtotal_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_discounts_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_line_items_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_shipping_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_tax_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "line_items": { "items": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "integer" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "integer" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, { "properties": {}, "type": [ "null", "object" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "processing_method": { "type": [ "null", "string" ] }, "order_number": { "type": [ "null", "string" ] }, "confirmed": { "type": [ "null", "boolean" ] }, "total_discounts": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "total_line_items_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "order_adjustments": { "items": { "properties": { "order_id": { "type": [ "null", "string" ] }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "refund_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "kind": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "reason": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "shipping_lines": { "items": { "properties": { "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "phone": { "type": [ "null", "string" ] }, "discounted_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "delivery_category": { "type": [ "null", "string" ] }, "discounted_price": { "type": [ "null", "number" ] }, "code": { "type": [ "null", "string" ] }, "requested_fulfillment_service_id": { "type": [ "null", "string" ] }, "carrier_identifier": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "source": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "device_id": { "type": [ "null", "string" ] }, "cancel_reason": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "payment_gateway_names": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "source_identifier": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "processed_at": { "type": [ "null", "string" ], "format": "date-time" }, "referring_site": { "type": [ "null", "string" ] }, "contact_email": { "type": [ "null", "string" ] }, "location_id": { "type": [ "null", "string" ] }, "fulfillments": { "items": { "properties": { "location_id": { "type": [ "null", "string" ] }, "receipt": { "type": [ "null", "object" ], "properties": { "testcase": { "type": [ "null", "boolean" ] }, "authorization": { "type": [ "null", "string" ] } } }, "tracking_number": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "shipment_status": { "type": [ "null", "string" ] }, "line_items": { "items": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "string" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, { "properties": {}, "type": [ "null", "object" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "tracking_url": { "type": [ "null", "string" ] }, "service": { "type": [ "null", "string" ] }, "status": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "tracking_urls": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "tracking_numbers": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "id": { "type": [ "null", "string" ] }, "tracking_company": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "customer": { "type": "object", "properties": { "last_order_name": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "email": { "type": [ "null", "string" ] }, "multipass_identifier": { "type": [ "null", "string" ] }, "default_address": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } }, "orders_count": { "type": [ "null", "integer" ] }, "state": { "type": [ "null", "string" ] }, "verified_email": { "type": [ "null", "boolean" ] }, "total_spent": { "type": [ "null", "string" ] }, "last_order_id": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "note": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "addresses": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } } }, "last_name": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "tax_exempt": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "accepts_marketing": { "type": [ "null", "boolean" ] }, "accepts_marketing_updated_at": { "type": [ "string", "null" ], "format": "date-time" }, "created_at": { "type": [ "null", "string" ], "format": "date-time" } } }, "test": { "type": [ "null", "boolean" ] }, "total_tax": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "payment_details": { "properties": { "avs_result_code": { "type": [ "null", "string" ] }, "credit_card_company": { "type": [ "null", "string" ] }, "cvv_result_code": { "type": [ "null", "string" ] }, "credit_card_bin": { "type": [ "null", "string" ] }, "credit_card_number": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "number": { "type": [ "null", "integer" ] }, "email": { "type": [ "null", "string" ] }, "source_name": { "type": [ "null", "string" ] }, "landing_site_ref": { "type": [ "null", "string" ] }, "shipping_address": { "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] }, "address2": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "country_code": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "total_price_usd": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "closed_at": { "type": [ "null", "string" ], "format": "date-time" }, "discount_applications": { "items": { "properties": { "target_type": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "description": { "type": [ "null", "string" ] }, "type": { "type": [ "null", "string" ] }, "target_selection": { "type": [ "null", "string" ] }, "allocation_method": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "value_type": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "name": { "type": [ "null", "string" ] }, "note": { "type": [ "null", "string" ] }, "user_id": { "type": [ "null", "string" ] }, "source_url": { "type": [ "null", "string" ] }, "subtotal_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "billing_address": { "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] }, "address2": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "country_code": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "landing_site": { "type": [ "null", "string" ] }, "taxes_included": { "type": [ "null", "boolean" ] }, "token": { "type": [ "null", "string" ] }, "app_id": { "type": [ "null", "string" ] }, "total_tip_received": { "type": [ "null", "number" ] }, "browser_ip": { "type": [ "null", "string" ] }, "discount_codes": { "items": { "properties": { "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "phone": { "type": [ "null", "string" ] }, "note_attributes": { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "order_status_url": { "type": [ "null", "string" ] }, "client_details": { "properties": { "session_hash": { "type": [ "null", "string" ] }, "accept_language": { "type": [ "null", "string" ] }, "browser_width": { "type": [ "null", "integer" ] }, "user_agent": { "type": [ "null", "string" ] }, "browser_ip": { "type": [ "null", "string" ] }, "browser_height": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] }, "buyer_accepts_marketing": { "type": [ "null", "boolean" ] }, "checkout_token": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "financial_status": { "type": [ "null", "string" ] }, "customer_locale": { "type": [ "null", "string" ] }, "checkout_id": { "type": [ "null", "string" ] }, "total_weight": { "type": [ "null", "integer" ] }, "gateway": { "type": [ "null", "string" ] }, "cart_token": { "type": [ "null", "string" ] }, "cancelled_at": { "type": [ "null", "string" ], "format": "date-time" }, "refunds": { "items": { "properties": { "admin_graphql_api_id": { "type": [ "null", "string" ] }, "refund_line_items": { "items": { "properties": { "line_item": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "string" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, { "properties": {}, "type": [ "null", "object" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "location_id": { "type": [ "null", "string" ] }, "line_item_id": { "type": [ "null", "string" ] }, "quantity": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "total_tax": { "type": [ "null", "number" ] }, "restock_type": { "type": [ "null", "string" ] }, "subtotal": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "restock": { "type": [ "null", "boolean" ] }, "note": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "user_id": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "processed_at": { "type": [ "null", "string" ], "format": "date-time" }, "order_adjustments": { "items": { "properties": { "order_id": { "type": [ "null", "string" ] }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "refund_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "kind": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "reason": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "reference": { "type": [ "null", "string" ] } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "presentment_currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "subtotal_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_discounts_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_line_items_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_shipping_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tax_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "line_items" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "processing_method" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_number" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "confirmed" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_discounts" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_line_items_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_adjustments" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "device_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cancel_reason" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "payment_gateway_names" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_identifier" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "processed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "referring_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "contact_email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "location_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "fulfillments" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "test" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tax" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "payment_details" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "number" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "landing_site_ref" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price_usd" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "closed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "discount_applications" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "user_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "subtotal_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "billing_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "landing_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "taxes_included" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "app_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tip_received" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "browser_ip" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "discount_codes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tax_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "phone" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note_attributes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "fulfillment_status" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_status_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "client_details" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "buyer_accepts_marketing" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "checkout_token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tags" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "financial_status" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer_locale" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "checkout_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_weight" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "gateway" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cart_token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cancelled_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "refunds" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "reference" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" # removed the object/dict with emppty properties shopify_orders_fixed = """ {"type":"SCHEMA", "stream": "orders", "tap_stream_id": "orders", "schema": { "properties": { "presentment_currency": { "type": [ "null", "string" ] }, "subtotal_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_discounts_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_line_items_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_shipping_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_tax_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "line_items": { "items": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "integer" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "integer" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "processing_method": { "type": [ "null", "string" ] }, "order_number": { "type": [ "null", "string" ] }, "confirmed": { "type": [ "null", "boolean" ] }, "total_discounts": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "total_line_items_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "order_adjustments": { "items": { "properties": { "order_id": { "type": [ "null", "string" ] }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "refund_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "kind": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "reason": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "shipping_lines": { "items": { "properties": { "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "phone": { "type": [ "null", "string" ] }, "discounted_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "delivery_category": { "type": [ "null", "string" ] }, "discounted_price": { "type": [ "null", "number" ] }, "code": { "type": [ "null", "string" ] }, "requested_fulfillment_service_id": { "type": [ "null", "string" ] }, "carrier_identifier": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "source": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "device_id": { "type": [ "null", "string" ] }, "cancel_reason": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "payment_gateway_names": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "source_identifier": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "processed_at": { "type": [ "null", "string" ], "format": "date-time" }, "referring_site": { "type": [ "null", "string" ] }, "contact_email": { "type": [ "null", "string" ] }, "location_id": { "type": [ "null", "string" ] }, "fulfillments": { "items": { "properties": { "location_id": { "type": [ "null", "string" ] }, "receipt": { "type": [ "null", "object" ], "properties": { "testcase": { "type": [ "null", "boolean" ] }, "authorization": { "type": [ "null", "string" ] } } }, "tracking_number": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "shipment_status": { "type": [ "null", "string" ] }, "line_items": { "items": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "string" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "tracking_url": { "type": [ "null", "string" ] }, "service": { "type": [ "null", "string" ] }, "status": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "tracking_urls": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "tracking_numbers": { "items": { "type": [ "null", "string" ] }, "type": [ "null", "array" ] }, "id": { "type": [ "null", "string" ] }, "tracking_company": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "customer": { "type": "object", "properties": { "last_order_name": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "email": { "type": [ "null", "string" ] }, "multipass_identifier": { "type": [ "null", "string" ] }, "default_address": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } }, "orders_count": { "type": [ "null", "integer" ] }, "state": { "type": [ "null", "string" ] }, "verified_email": { "type": [ "null", "boolean" ] }, "total_spent": { "type": [ "null", "string" ] }, "last_order_id": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "note": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "addresses": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } } }, "last_name": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "tax_exempt": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "accepts_marketing": { "type": [ "null", "boolean" ] }, "accepts_marketing_updated_at": { "type": [ "string", "null" ], "format": "date-time" }, "created_at": { "type": [ "null", "string" ], "format": "date-time" } } }, "test": { "type": [ "null", "boolean" ] }, "total_tax": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "payment_details": { "properties": { "avs_result_code": { "type": [ "null", "string" ] }, "credit_card_company": { "type": [ "null", "string" ] }, "cvv_result_code": { "type": [ "null", "string" ] }, "credit_card_bin": { "type": [ "null", "string" ] }, "credit_card_number": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "number": { "type": [ "null", "integer" ] }, "email": { "type": [ "null", "string" ] }, "source_name": { "type": [ "null", "string" ] }, "landing_site_ref": { "type": [ "null", "string" ] }, "shipping_address": { "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] }, "address2": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "country_code": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "total_price_usd": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "closed_at": { "type": [ "null", "string" ], "format": "date-time" }, "discount_applications": { "items": { "properties": { "target_type": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "description": { "type": [ "null", "string" ] }, "type": { "type": [ "null", "string" ] }, "target_selection": { "type": [ "null", "string" ] }, "allocation_method": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "value_type": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "name": { "type": [ "null", "string" ] }, "note": { "type": [ "null", "string" ] }, "user_id": { "type": [ "null", "string" ] }, "source_url": { "type": [ "null", "string" ] }, "subtotal_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "billing_address": { "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] }, "address2": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "country_code": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "landing_site": { "type": [ "null", "string" ] }, "taxes_included": { "type": [ "null", "boolean" ] }, "token": { "type": [ "null", "string" ] }, "app_id": { "type": [ "null", "string" ] }, "total_tip_received": { "type": [ "null", "number" ] }, "browser_ip": { "type": [ "null", "string" ] }, "discount_codes": { "items": { "properties": { "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "phone": { "type": [ "null", "string" ] }, "note_attributes": { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "order_status_url": { "type": [ "null", "string" ] }, "client_details": { "properties": { "session_hash": { "type": [ "null", "string" ] }, "accept_language": { "type": [ "null", "string" ] }, "browser_width": { "type": [ "null", "integer" ] }, "user_agent": { "type": [ "null", "string" ] }, "browser_ip": { "type": [ "null", "string" ] }, "browser_height": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] }, "buyer_accepts_marketing": { "type": [ "null", "boolean" ] }, "checkout_token": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "financial_status": { "type": [ "null", "string" ] }, "customer_locale": { "type": [ "null", "string" ] }, "checkout_id": { "type": [ "null", "string" ] }, "total_weight": { "type": [ "null", "integer" ] }, "gateway": { "type": [ "null", "string" ] }, "cart_token": { "type": [ "null", "string" ] }, "cancelled_at": { "type": [ "null", "string" ], "format": "date-time" }, "refunds": { "items": { "properties": { "admin_graphql_api_id": { "type": [ "null", "string" ] }, "refund_line_items": { "items": { "properties": { "line_item": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "string" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "location_id": { "type": [ "null", "string" ] }, "line_item_id": { "type": [ "null", "string" ] }, "quantity": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "total_tax": { "type": [ "null", "number" ] }, "restock_type": { "type": [ "null", "string" ] }, "subtotal": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "restock": { "type": [ "null", "boolean" ] }, "note": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "user_id": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "processed_at": { "type": [ "null", "string" ], "format": "date-time" }, "order_adjustments": { "items": { "properties": { "order_id": { "type": [ "null", "string" ] }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "refund_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "kind": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "reason": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "reference": { "type": [ "null", "string" ] } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "presentment_currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "subtotal_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_discounts_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_line_items_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_shipping_price_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tax_set" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "line_items" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "processing_method" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_number" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "confirmed" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_discounts" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_line_items_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_adjustments" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "device_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cancel_reason" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "payment_gateway_names" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_identifier" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "processed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "referring_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "contact_email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "location_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "fulfillments" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "test" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tax" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "payment_details" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "number" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "landing_site_ref" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price_usd" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "closed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "discount_applications" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "user_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "subtotal_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "billing_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "landing_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "taxes_included" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "app_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tip_received" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "browser_ip" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "discount_codes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tax_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "phone" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note_attributes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "fulfillment_status" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_status_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "client_details" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "buyer_accepts_marketing" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "checkout_token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tags" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "financial_status" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer_locale" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "checkout_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_weight" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "gateway" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cart_token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cancelled_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "refunds" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "reference" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" } """ """old schema conversion test is failing on this one new schema conversion is handling it """ shopify_customers = """{"type":"SCHEMA", "stream": "customers", "tap_stream_id": "customers", "schema": { "type": "object", "properties": { "last_order_name": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "email": { "type": [ "null", "string" ] }, "multipass_identifier": { "type": [ "null", "string" ] }, "default_address": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } }, "orders_count": { "type": [ "null", "integer" ] }, "state": { "type": [ "null", "string" ] }, "verified_email": { "type": [ "null", "boolean" ] }, "total_spent": { "type": [ "null", "string" ] }, "last_order_id": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "note": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "addresses": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } } }, "last_name": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "tax_exempt": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "accepts_marketing": { "type": [ "null", "boolean" ] }, "accepts_marketing_updated_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "string" }, { "type": "null" } ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" } } }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "last_order_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "multipass_identifier" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "default_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "orders_count" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "state" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "verified_email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_spent" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "last_order_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "first_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "note" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "phone" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "addresses" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "last_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tags" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tax_exempt" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "accepts_marketing" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "accepts_marketing_updated_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" shopify_custom_collections = """{"type":"SCHEMA", "stream": "custom_collections", "tap_stream_id": "custom_collections", "schema": { "properties": { "handle": { "type": [ "null", "string" ] }, "sort_order": { "type": [ "null", "string" ] }, "body_html": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "published_scope": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "image": { "properties": { "alt": { "type": [ "null", "string" ] }, "src": { "type": [ "null", "string" ] }, "width": { "type": [ "null", "integer" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "height": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] }, "published_at": { "type": [ "null", "string" ], "format": "date-time" }, "template_suffix": { "type": [ "null", "string" ] } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "handle" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "sort_order" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "body_html" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "title" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "published_scope" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "image" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "published_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "template_suffix" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL"} """ """ shopify_abandoned_checkouts_malformed it also has empty properties , { "properties": {}, "type": [ "null", "object" ] } """ shopify_abandoned_checkouts_fixed = """{"type":"SCHEMA", "stream": "abandoned_checkouts", "tap_stream_id": "abandoned_checkouts", "schema": { "type": "object", "properties": { "note_attributes": { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "location_id": { "type": [ "null", "string" ] }, "buyer_accepts_marketing": { "type": [ "null", "boolean" ] }, "currency": { "type": [ "null", "string" ] }, "completed_at": { "type": [ "null", "string" ], "format": "date-time" }, "token": { "type": [ "null", "string" ] }, "billing_address": { "type": [ "null", "object" ], "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "zip": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] } } }, "email": { "type": [ "null", "string" ] }, "discount_codes": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "type": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "code": { "type": [ "null", "string" ] } } } }, "customer_locale": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "gateway": { "type": [ "null", "string" ] }, "referring_site": { "type": [ "null", "string" ] }, "source_identifier": { "type": [ "null", "string" ] }, "total_weight": { "type": [ "null", "integer" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "total_line_items_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "closed_at": { "type": [ "null", "string" ], "format": "date-time" }, "device_id": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "source_name": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "total_tax": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "subtotal_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "line_items": { "items": { "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "grams": { "type": [ "null", "integer" ] }, "compare_at_price": { "type": [ "null", "number" ] }, "destination_location_id": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "line_price": { "type": [ "null", "string" ] }, "origin_location_id": { "type": [ "null", "string" ] }, "applied_discount": { "type": [ "null", "integer" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "variant_title": { "type": [ "null", "string" ] }, "properties": { "anyOf": [ { "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] } ] }, "tax_code": { "type": [ "null", "string" ] }, "discount_allocations": { "items": { "properties": { "discount_application_index": { "type": [ "null", "integer" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "number" ] }, "sku": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "total_discount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "name": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "gift_card": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "origin_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requires_shipping": { "type": [ "null", "boolean" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "destination_location": { "properties": { "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "quantity": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "variant_id": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "source_url": { "type": [ "null", "string" ] }, "total_discounts": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "note": { "type": [ "null", "string" ] }, "presentment_currency": { "type": [ "null", "string" ] }, "shipping_lines": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "applied_discounts": { "type": [ "null", "array" ], "items": { "properties": { "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] }, "savings": { "type": [ "null", "number" ] }, "type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "custom_tax_lines": { "items": { "properties": { "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "phone": { "type": [ "null", "string" ] }, "validation_context": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "carrier_identifier": { "type": [ "null", "string" ] }, "api_client_id": { "type": [ "null", "string" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "requested_fulfillment_service_id": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "code": { "type": [ "null", "string" ] }, "tax_lines": { "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "compare_at": { "type": [ "null", "number" ] }, "position": { "type": [ "null", "integer" ] }, "source": { "type": [ "null", "string" ] }, "zone": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "carrier_service_id": { "type": [ "null", "string" ] }, "delivery_category": { "type": [ "null", "string" ] }, "markup": { "type": [ "null", "string" ] }, "source": { "type": [ "null", "string" ] } } } }, "user_id": { "type": [ "null", "string" ] }, "source": { "type": [ "null", "string" ] }, "shipping_address": { "type": [ "null", "object" ], "properties": { "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "latitude": { "type": [ "null", "number" ] }, "zip": { "type": [ "null", "string" ] }, "last_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "city": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "longitude": { "type": [ "null", "number" ] } } }, "abandoned_checkout_url": { "type": [ "null", "string" ] }, "landing_site": { "type": [ "null", "string" ] }, "customer": { "type": "object", "properties": { "last_order_name": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "email": { "type": [ "null", "string" ] }, "multipass_identifier": { "type": [ "null", "string" ] }, "default_address": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } }, "orders_count": { "type": [ "null", "integer" ] }, "state": { "type": [ "null", "string" ] }, "verified_email": { "type": [ "null", "boolean" ] }, "total_spent": { "type": [ "null", "string" ] }, "last_order_id": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "note": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "addresses": { "type": [ "null", "array" ], "items": { "type": [ "null", "object" ], "properties": { "city": { "type": [ "null", "string" ] }, "address1": { "type": [ "null", "string" ] }, "zip": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "country_name": { "type": [ "null", "string" ] }, "province": { "type": [ "null", "string" ] }, "phone": { "type": [ "null", "string" ] }, "country": { "type": [ "null", "string" ] }, "first_name": { "type": [ "null", "string" ] }, "customer_id": { "type": [ "null", "string" ] }, "default": { "type": [ "null", "boolean" ] }, "last_name": { "type": [ "null", "string" ] }, "country_code": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "province_code": { "type": [ "null", "string" ] }, "address2": { "type": [ "null", "string" ] }, "company": { "type": [ "null", "string" ] } } } }, "last_name": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "tax_exempt": { "type": [ "null", "boolean" ] }, "id": { "type": [ "null", "string" ] }, "accepts_marketing": { "type": [ "null", "boolean" ] }, "accepts_marketing_updated_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "string" }, { "type": "null" } ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" } } }, "total_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "cart_token": { "type": [ "null", "string" ] }, "taxes_included": { "type": [ "null", "boolean" ] } } }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "note_attributes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "location_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "buyer_accepts_marketing" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "completed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "billing_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "email" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "discount_codes" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer_locale" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "gateway" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "referring_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_identifier" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_weight" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tax_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_line_items_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "closed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "device_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "phone" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_tax" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "subtotal_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "line_items" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_discounts" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "presentment_currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_lines" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "user_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "shipping_address" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "abandoned_checkout_url" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "landing_site" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "customer" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "total_price" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "cart_token" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "taxes_included" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" shopify_products = """{"type":"SCHEMA", "stream": "products", "tap_stream_id": "products", "schema": { "properties": { "published_at": { "type": [ "null", "string" ], "format": "date-time" }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "published_scope": { "type": [ "null", "string" ] }, "vendor": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "body_html": { "type": [ "null", "string" ] }, "product_type": { "type": [ "null", "string" ] }, "tags": { "type": [ "null", "string" ] }, "options": { "type": [ "null", "array" ], "items": { "properties": { "name": { "type": [ "null", "string" ] }, "product_id": { "type": [ "null", "string" ] }, "values": { "type": [ "null", "array" ], "items": { "type": [ "null", "string" ] } }, "id": { "type": [ "null", "string" ] }, "position": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] } }, "image": { "properties": { "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "variant_ids": { "type": [ "null", "array" ], "items": { "type": [ "null", "string" ] } }, "height": { "type": [ "null", "integer" ] }, "alt": { "type": [ "null", "string" ] }, "src": { "type": [ "null", "string" ] }, "position": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "width": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] }, "handle": { "type": [ "null", "string" ] }, "images": { "type": [ "null", "array" ], "items": { "properties": { "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "variant_ids": { "type": [ "null", "array" ], "items": { "type": [ "null", "string" ] } }, "height": { "type": [ "null", "integer" ] }, "alt": { "type": [ "null", "string" ] }, "src": { "type": [ "null", "string" ] }, "position": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "width": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] } }, "template_suffix": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "variants": { "type": [ "null", "array" ], "items": { "properties": { "barcode": { "type": [ "null", "string" ] }, "tax_code": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "weight_unit": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "position": { "type": [ "null", "integer" ] }, "price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "image_id": { "type": [ "null", "string" ] }, "inventory_policy": { "type": [ "null", "string" ] }, "sku": { "type": [ "null", "string" ] }, "inventory_item_id": { "type": [ "null", "string" ] }, "fulfillment_service": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "weight": { "type": [ "null", "number" ] }, "inventory_management": { "type": [ "null", "string" ] }, "taxable": { "type": [ "null", "boolean" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "option1": { "type": [ "null", "string" ] }, "compare_at_price": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" }, "option2": { "type": [ "null", "string" ] }, "old_inventory_quantity": { "type": [ "null", "integer" ] }, "requires_shipping": { "type": [ "null", "boolean" ] }, "inventory_quantity": { "type": [ "null", "integer" ] }, "grams": { "type": [ "null", "integer" ] }, "option3": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "published_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "published_scope" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "vendor" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "body_html" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "product_type" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "tags" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "options" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "image" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "handle" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "images" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "template_suffix" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "title" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "variants" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" shopify_transactions = """{"type":"SCHEMA", "stream": "transactions", "tap_stream_id": "transactions", "schema": { "properties": { "error_code": { "type": [ "null", "string" ] }, "device_id": { "type": [ "null", "string" ] }, "user_id": { "type": [ "null", "string" ] }, "parent_id": { "type": [ "null", "string" ] }, "test": { "type": [ "null", "boolean" ] }, "kind": { "type": [ "null", "string" ] }, "order_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "authorization": { "type": [ "null", "string" ] }, "currency": { "type": [ "null", "string" ] }, "source_name": { "type": [ "null", "string" ] }, "message": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "status": { "type": [ "null", "string" ] }, "payment_details": { "properties": { "cvv_result_code": { "type": [ "null", "string" ] }, "credit_card_bin": { "type": [ "null", "string" ] }, "credit_card_company": { "type": [ "null", "string" ] }, "credit_card_number": { "type": [ "null", "string" ] }, "avs_result_code": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "gateway": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "receipt": { "type": [ "null", "object" ], "properties": { "fee_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "gross_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 } }, "patternProperties": { ".+": {} } }, "location_id": { "type": [ "null", "string" ] } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "created_at" ], "selected": true } }, { "breadcrumb": [ "properties", "error_code" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "device_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "user_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "parent_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "test" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "kind" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "amount" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "authorization" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "currency" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "source_name" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "message" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "status" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "payment_details" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "gateway" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "receipt" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "location_id" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "created_at", "replication_method": "INCREMENTAL" }""" shopify_metafields_malformed = """{"type":"SCHEMA", "stream": "metafields", "tap_stream_id": "metafields", "schema": { "properties": { "owner_id": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "owner_resource": { "type": [ "null", "string" ] }, "value_type": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "id": { "type": [ "null", "string" ] }, "namespace": { "type": [ "null", "string" ] }, "description": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "integer", "object", "string" ], "properties": {} }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": false } }, { "breadcrumb": [ "properties", "owner_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "owner_resource" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "value_type" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "key" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "namespace" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "description" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "value" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" """ replaced this : "value": { "type": [ "null", "integer", "object", "string" ], "properties": {} } with this: "value": { "type": [ "null", "integer", "object", "string" ] } removed "properties": {} """ shopify_metafields_fixed = """{"type":"SCHEMA", "stream": "metafields", "tap_stream_id": "metafields", "schema": { "properties": { "owner_id": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "owner_resource": { "type": [ "null", "string" ] }, "value_type": { "type": [ "null", "string" ] }, "key": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "id": { "type": [ "null", "string" ] }, "namespace": { "type": [ "null", "string" ] }, "description": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "integer", "object", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" } }, "type": "object" }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": false } }, { "breadcrumb": [ "properties", "owner_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "owner_resource" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "value_type" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "key" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "namespace" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "description" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "value" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }""" shopify_order_refunds = """{"type":"SCHEMA", "stream": "order_refunds", "tap_stream_id": "order_refunds", "schema": { "type": "object", "properties": { "order_id": { "type": [ "null", "string" ] }, "restock": { "type": [ "null", "boolean" ] }, "order_adjustments": { "items": { "properties": { "order_id": { "type": [ "null", "string" ] }, "tax_amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "refund_id": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ], "multipleOf": 1e-10 }, "kind": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "reason": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "type": [ "null", "array" ] }, "processed_at": { "type": [ "null", "string" ], "format": "date-time" }, "user_id": { "type": [ "null", "string" ] }, "note": { "type": [ "null", "string" ] }, "id": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "refund_line_items": { "type": [ "null", "array" ], "items": { "properties": { "location_id": { "type": [ "null", "string" ] }, "subtotal_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "total_tax_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "line_item_id": { "type": [ "null", "string" ] }, "total_tax": { "type": [ "null", "number" ] }, "quantity": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "line_item": { "properties": { "gift_card": { "type": [ "null", "boolean" ] }, "price": { "type": [ "null", "string" ] }, "tax_lines": { "type": [ "null", "array" ], "items": { "properties": { "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "price": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] }, "rate": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "fulfillment_service": { "type": [ "null", "string" ] }, "sku": { "type": [ "null", "string" ] }, "fulfillment_status": { "type": [ "null", "string" ] }, "properties": { "type": [ "null", "array" ], "items": { "properties": { "name": { "type": [ "null", "string" ] }, "value": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } }, "quantity": { "type": [ "null", "integer" ] }, "variant_id": { "type": [ "null", "string" ] }, "grams": { "type": [ "null", "integer" ] }, "requires_shipping": { "type": [ "null", "boolean" ] }, "vendor": { "type": [ "null", "string" ] }, "price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "variant_inventory_management": { "type": [ "null", "string" ] }, "pre_tax_price": { "type": [ "null", "string" ] }, "variant_title": { "type": [ "null", "string" ] }, "total_discount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "discount_allocations": { "type": [ "null", "array" ], "items": { "properties": { "amount": { "type": [ "null", "number" ] }, "amount_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "discount_application_index": { "type": [ "null", "integer" ] } }, "type": [ "null", "object" ] } }, "pre_tax_price_set": { "properties": { "shop_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] }, "presentment_money": { "properties": { "currency_code": { "type": [ "null", "string" ] }, "amount": { "type": [ "null", "number" ] } }, "type": [ "null", "object" ] } }, "type": [ "null", "object" ] }, "fulfillable_quantity": { "type": [ "null", "integer" ] }, "id": { "type": [ "null", "string" ] }, "admin_graphql_api_id": { "type": [ "null", "string" ] }, "total_discount": { "type": [ "null", "string" ] }, "name": { "type": [ "null", "string" ] }, "product_exists": { "type": [ "null", "boolean" ] }, "taxable": { "type": [ "null", "boolean" ] }, "product_id": { "type": [ "null", "string" ] }, "title": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] }, "subtotal": { "type": [ "null", "number" ] }, "restock_type": { "type": [ "null", "string" ] } }, "type": [ "null", "object" ] } } } }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "created_at" ], "selected": true } }, { "breadcrumb": [ "properties", "order_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "restock" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "order_adjustments" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "processed_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "user_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "note" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "admin_graphql_api_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "refund_line_items" ], "metadata": { "inclusion": "available", "selected": true } } ], "key_properties": [ "id" ], "replication_key": "created_at", "replication_method": "INCREMENTAL" }""" shopify_collects = """{"type":"SCHEMA", "stream": "collects", "tap_stream_id": "collects", "schema": { "type": "object", "properties": { "id": { "type": [ "null", "string" ] }, "collection_id": { "type": [ "null", "string" ] }, "created_at": { "type": [ "null", "string" ], "format": "date-time" }, "position": { "type": [ "null", "integer" ] }, "product_id": { "type": [ "null", "string" ] }, "sort_value": { "type": [ "null", "string" ] }, "updated_at": { "type": [ "null", "string" ], "format": "date-time" } } }, "metadata": [ { "breadcrumb": [], "metadata": { "table-key-properties": [ "id" ], "forced-replication-method": "INCREMENTAL", "valid-replication-keys": [ "updated_at" ], "selected": true } }, { "breadcrumb": [ "properties", "id" ], "metadata": { "inclusion": "automatic" } }, { "breadcrumb": [ "properties", "collection_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "created_at" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "position" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "product_id" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "sort_value" ], "metadata": { "inclusion": "available", "selected": true } }, { "breadcrumb": [ "properties", "updated_at" ], "metadata": { "inclusion": "automatic" } } ], "key_properties": [ "id" ], "replication_key": "updated_at", "replication_method": "INCREMENTAL" }"""
28.565154
99
0.167704
13,005
452,672
5.713725
0.025221
0.209509
0.20706
0.119289
0.966033
0.955361
0.951741
0.936789
0.91445
0.90838
0
0.00256
0.73684
452,672
15,847
100
28.565154
0.621213
0.002136
0
0.675557
0
0
0.999151
0.012802
0
0
0
0
0
1
0
false
0.000318
0
0
0
0
0
0
0
null
1
1
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
0ed51f28ba67a1d02a10a4fc54a5773f0a350f91
58,747
py
Python
hs_core/tests/api/rest/test_resource_science_meta.py
kjlippold/hydroshare
a82c6a3ce2aee3f00d1a1022f7c328e6a610fc3f
[ "BSD-3-Clause" ]
null
null
null
hs_core/tests/api/rest/test_resource_science_meta.py
kjlippold/hydroshare
a82c6a3ce2aee3f00d1a1022f7c328e6a610fc3f
[ "BSD-3-Clause" ]
null
null
null
hs_core/tests/api/rest/test_resource_science_meta.py
kjlippold/hydroshare
a82c6a3ce2aee3f00d1a1022f7c328e6a610fc3f
[ "BSD-3-Clause" ]
null
null
null
from rest_framework import status from hs_core.hydroshare import resource from hs_core.hydroshare.utils import resource_post_create_actions from .base import HSRESTTestCase class TestResourceScienceMetadata(HSRESTTestCase): def setUp(self): super(TestResourceScienceMetadata, self).setUp() self.rtype = 'GenericResource' self.title = 'My Test resource' res = resource.create_resource(self.rtype, self.user, self.title) self.resource = res self.pid = res.short_id self.resources_to_delete.append(self.pid) # create another resource for testing relation metadata another_res = resource.create_resource('GenericResource', self.user, 'My another Test resource') self.pid2 = another_res.short_id self.resources_to_delete.append(self.pid2) def test_get_scimeta(self): # Get the resource system metadata sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid) response = self.client.get(sysmeta_url) self.assertEqual(response.status_code, status.HTTP_200_OK) # content = json.loads(response.content) def test_put_scimeta_generic_resource(self): sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1", "identifiers": {"ORCID": "https://orcid.org/012", "ResearchGateID": "https://www.researchgate.net/002"} }, { "name": None, "organization": "Org 2" }], "creators": [{ "name": "Creator 1", "organization": None }, { "name": "Creator 2", "organization": "USU", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "relations": [ { "type": "isCopiedFrom", "value": "https://www.hydroshare.org/resource/{}/".format(self.pid2) }, { "type": "isExecutedBy", "value": "https://www.hydroshare.org/resource/{}/".format(self.pid2) } ], "funding_agencies": [ { "agency_name": "NSF", "award_title": "Cyber Infrastructure", "award_number": "NSF-101-20-6789", "agency_url": "https://www.nsf.gov", }, { "agency_name": "NSF2", "award_title": "Cyber Infrastructure2", "award_number": "NSF-123", "agency_url": "https://www.google.com", } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.assertEqual(self.resource.metadata.dates.all().count(), 3) self.assertEqual(self.resource.metadata.sources.all().count(), 2) self.assertEqual(self.resource.metadata.relations.all().count(), 2) self.assertEqual(self.resource.metadata.funding_agencies.all().count(), 2) self.assertEqual(str(self.resource.metadata.rights), "CCC http://www.hydroshare.org") self.assertEqual(str(self.resource.metadata.language), "fre") self.assertEqual(self.resource.metadata.coverages.all().count(), 1) self.assertEqual(self.resource.metadata.creators.all().count(), 2) self.assertEqual(self.resource.metadata.contributors.all().count(), 2) self.assertEqual(self.resource.metadata.subjects.all().count(), 3) self.assertEqual(str(self.resource.metadata.description), "New Description") self.assertEqual(str(self.resource.metadata.title), "New Title") def test_put_scimeta_generic_resource_double_none(self): sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": None, "organization": "Org 2" }], "creators": [ { "name": "Creator", "organization": None }, { "name": None, "organization": None } ], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_put_scimeta_composite_resource_with_core_metadata(self): # testing bulk metadata update that includes only core metadata # create a composite resource self._create_resource(resource_type="CompositeResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "relations": [ { "type": "isCopiedFrom", "value": "https://www.hydroshare.org/resource/{}/".format(self.pid2) }, { "type": "isExecutedBy", "value": "https://www.hydroshare.org/resource/{}/".format(self.pid2) } ], "funding_agencies": [ { "agency_name": "NSF", "award_title": "Cyber Infrastructure", "award_number": "NSF-101-20-6789", "agency_url": "https://www.nsf.gov", }, { "agency_name": "NSF2", "award_title": "Cyber Infrastructure2", "award_number": "NSF-123", "agency_url": "https://www.google.com", } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.assertEqual(self.resource.metadata.dates.all().count(), 3) self.assertEqual(self.resource.metadata.sources.all().count(), 2) self.assertEqual(self.resource.metadata.relations.all().count(), 2) self.assertEqual(self.resource.metadata.funding_agencies.all().count(), 2) self.assertEqual(str(self.resource.metadata.rights), "CCC http://www.hydroshare.org") self.assertEqual(str(self.resource.metadata.language), "fre") self.assertEqual(self.resource.metadata.creators.all().count(), 1) self.assertEqual(self.resource.metadata.contributors.all().count(), 2) self.assertEqual(self.resource.metadata.subjects.all().count(), 3) self.assertEqual(str(self.resource.metadata.description), "New Description") self.assertEqual(str(self.resource.metadata.title), "New Title") self.resource.delete() def test_put_scimeta_composite_resource_with_core_metadata_and_coverage(self): # testing bulk metadata update with only core metadata that includes coverage metadata # create a composite resource self._create_resource(resource_type="CompositeResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_timeseries_resource_with_core_metadata(self): # testing bulk metadata update that includes only core metadata # create a composite resource self._create_resource(resource_type="TimeSeriesResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_timeseries_resource_with_core_metadata_failure(self): # testing bulk metadata update with only core metadata that includes coverage metadata # coverage metadata can't be updated for time series resource - this bulk update should fail # create a composite resource self._create_resource(resource_type="TimeSeriesResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.resource.delete() def test_put_scimeta_netcdf_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a netcdf resource netcdf_file = 'hs_core/tests/data/netcdf_valid.nc' file_to_upload = open(netcdf_file, "r") self._create_resource(resource_type="NetcdfResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "originalcoverage": { "value": { "northlimit": '12', "projection": "transverse_mercator", "units": "meter", "southlimit": '10', "eastlimit": '23', "westlimit": '2' }, "projection_string_text": '+proj=tmerc +lon_0=-111.0 +lat_0=0.0 +x_0=500000.0 ' '+y_0=0.0 +k_0=0.9996', "projection_string_type": 'Proj4 String' }, "variables": [ { "name": "SWE", "type": "Float", "shape": "y,x,time", "unit": "m", "missing_value": "-9999", "descriptive_name": "Snow water equivalent", "method": "model simulation of UEB" }, { "name": "x", "unit": "Centimeter" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_netcdf_resource_without_core_metadata(self): # testing bulk metadata update that only updates resource specific metadata # create a netcdf resource netcdf_file = 'hs_core/tests/data/netcdf_valid.nc' file_to_upload = open(netcdf_file, "r") self._create_resource(resource_type="NetcdfResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "originalcoverage": { "value": { "northlimit": '12', "projection": "transverse_mercator", "units": "meter", "southlimit": '10', "eastlimit": '23', "westlimit": '2' }, "projection_string_text": '+proj=tmerc +lon_0=-111.0 +lat_0=0.0 +x_0=500000.0 ' '+y_0=0.0 +k_0=0.9996', "projection_string_type": 'Proj4 String' }, "variables": [ { "name": "SWE", "type": "Float", "shape": "y,x,time", "unit": "m", "missing_value": "-9999", "descriptive_name": "Snow water equivalent", "method": "model simulation of UEB" }, { "name": "x", "unit": "Centimeter" } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_raster_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update (Note: the only resource specific metadata element that can be updated # is BandInformation) # create a raster resource raster_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(raster_file, "r") self._create_resource(resource_type="RasterResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "bandinformations": [ {'original_band_name': 'Band_1', 'name': 'Band_2', 'variableName': 'digital elevation', 'variableUnit': 'meter', 'method': 'this is method', 'comment': 'this is comment', 'maximumValue': 1000, 'minimumValue': 0, 'noDataValue': -9999 } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_raster_resource_without_core_metadata(self): # testing bulk metadata update that includes only resource specific # metadata update (Note: the only resource specific metadata element that can be updated # is BandInformation) # create a raster resource raster_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(raster_file, "r") self._create_resource(resource_type="RasterResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "bandinformations": [ {'original_band_name': 'Band_1', 'name': 'Band_2', 'variableName': 'digital elevation', 'variableUnit': 'meter', 'method': 'this is method', 'comment': 'this is comment', 'maximumValue': 1000, 'minimumValue': 0, 'noDataValue': -9999 } ] } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_modelprogram_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a model program resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="ModelProgramResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "mpmetadata": { "modelVersion": "5.1.011", "modelProgramLanguage": "Fortran", "modelOperatingSystem": "Windows", "modelReleaseDate": "2016-10-24T21:05:00.315907+00:00", "modelWebsite": "http://www.hydroshare.org", "modelCodeRepository": "http://www.github.com", "modelReleaseNotes": "releaseNote.pdf", "modelDocumentation": "manual.pdf", "modelSoftware": "utilities.exe", "modelEngine": "sourceCode.zip" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_modelprogram_resource_without_core_metadata(self): # testing bulk metadata update that only updates resource specific # metadata # create a model program resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="ModelProgramResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "mpmetadata": { "modelVersion": "5.1.011", "modelProgramLanguage": "Fortran", "modelOperatingSystem": "Windows", "modelReleaseDate": "2016-10-24T21:05:00.315907+00:00", "modelWebsite": "http://www.hydroshare.org", "modelCodeRepository": "http://www.github.com", "modelReleaseNotes": "releaseNote.pdf", "modelDocumentation": "manual.pdf", "modelSoftware": "utilities.exe", "modelEngine": "sourceCode.zip" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_modelinstance_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a model instance resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="ModelInstanceResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "modeloutput": {"includes_output": False}, "executedby": {"model_name": "id of a an existing model program resource"} } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_modelinstance_resource_without_core_metadata(self): # testing bulk metadata update updates only resource specific metadata # create a model instance resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="ModelInstanceResource", file_to_upload=file_to_upload) # create a model program resource to link as executed by model_program_resource = resource.create_resource( resource_type="ModelProgramResource", owner=self.user, title="A model program resource", files=(file_to_upload,) ) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "modeloutput": {"includes_output": True}, "executedby": {"model_name": model_program_resource.short_id} } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() model_program_resource.delete() def test_put_scimeta_modflowinstance_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a MODFLOW model instance resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="MODFLOWModelInstanceResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "modeloutput": {"includes_output": False}, "executedby": {"model_name": "id of a an existing model program resource"}, "studyarea": { "totalLength": 1111, "totalWidth": 2222, "maximumElevation": 3333, "minimumElevation": 4444 }, "griddimensions": { "numberOfLayers": 5555, "typeOfRows": "Irregular", "numberOfRows": 6666, "typeOfColumns": "Regular", "numberOfColumns": 7777 }, "stressperiod": { "stressPeriodType": "Steady and Transient", "steadyStateValue": 8888, "transientStateValueType": "Monthly", "transientStateValue": 9999 }, "groundwaterflow": { "flowPackage": "LPF", "flowParameter": "Hydraulic Conductivity" }, "boundarycondition": { "specified_head_boundary_packages": ["CHD", "FHB"], "specified_flux_boundary_packages": ["FHB", "WEL"], "head_dependent_flux_boundary_packages": ["RIV", "MNW1"] }, "modelcalibration": { "calibratedParameter": "test parameter", "observationType": "test observation type", "observationProcessPackage": "GBOB", "calibrationMethod": "test calibration method" }, "modelinputs": [ { "inputType": "test input type", "inputSourceName": "test source name", "inputSourceURL": "http://www.test.com" } ], "generalelements": { "modelParameter": "test model parameter", "modelSolver": "SIP", "output_control_package": ["HYD", "OC"], "subsidencePackage": "SWT" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_modflowinstance_resource_without_core_metadata(self): # testing bulk metadata update that updates onlt the resource specific # metadata # create a MODFLOW model instance resource some_file = 'hs_core/tests/data/cea.tif' file_to_upload = open(some_file, "r") self._create_resource(resource_type="MODFLOWModelInstanceResource", file_to_upload=file_to_upload) sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "modeloutput": {"includes_output": False}, "executedby": {"model_name": "id of a an existing model program resource"}, "studyarea": { "totalLength": 1111, "totalWidth": 2222, "maximumElevation": 3333, "minimumElevation": 4444 }, "griddimensions": { "numberOfLayers": 5555, "typeOfRows": "Irregular", "numberOfRows": 6666, "typeOfColumns": "Regular", "numberOfColumns": 7777 }, "stressperiod": { "stressPeriodType": "Steady and Transient", "steadyStateValue": 8888, "transientStateValueType": "Monthly", "transientStateValue": 9999 }, "groundwaterflow": { "flowPackage": "LPF", "flowParameter": "Hydraulic Conductivity" }, "boundarycondition": { "specified_head_boundary_packages": ["CHD", "FHB"], "specified_flux_boundary_packages": ["FHB", "WEL"], "head_dependent_flux_boundary_packages": ["RIV", "MNW1"] }, "modelcalibration": { "calibratedParameter": "test parameter", "observationType": "test observation type", "observationProcessPackage": "GBOB", "calibrationMethod": "test calibration method" }, "modelinputs": [ { "inputType": "test input type-1", "inputSourceName": "test source name-1", "inputSourceURL": "http://www.test-1.com" }, { "inputType": "test input type-2", "inputSourceName": "test source name-2", "inputSourceURL": "http://www.test-2.com" } ], "generalelements": { "modelParameter": "test model parameter", "modelSolver": "SIP", "output_control_package": ["HYD", "OC"], "subsidencePackage": "SWT" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_script_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a script resource self._create_resource(resource_type="ScriptResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }, { "name": "Test Name 2", "organization": "Org 2" }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "scriptspecificmetadata": { "scriptLanguage": "R", "languageVersion": "3.5", "scriptVersion": "1.0", "scriptDependencies": "None", "scriptReleaseDate": "2015-12-01 00:00", "scriptCodeRepository": "http://www.google.com" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_script_resource_without_core_metadata(self): # testing bulk metadata update for resource specific # metadata only # create a script resource self._create_resource(resource_type="ScriptResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "scriptspecificmetadata": { "scriptLanguage": "R", "languageVersion": "3.5", "scriptVersion": "1.0", "scriptDependencies": "None", "scriptReleaseDate": "2015-12-01 00:00", "scriptCodeRepository": "http://www.google.com" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_SWATModelInstance_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a SWAT model resource self._create_resource(resource_type="SWATModelInstanceResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "modeloutput": {"includes_output": False}, "executedby": {"model_name": "id of a an existing model program resource"}, "modelobjective": { "swat_model_objectives": ["BMPs", "Hydrology", "Water quality"], "other_objectives": "some other objectives" }, "simulationtype": { "simulation_type_name": "Normal Simulation" }, "modelmethod": { "runoffCalculationMethod": "A test calculation method", "flowRoutingMethod": "A test flow routing method", "petEstimationMethod": "A test estimation method" }, "modelparameter": { "model_parameters": ["Crop rotation", "Tillage operation"], "other_parameters": "some other model parameters" }, "modelinput": { "warmupPeriodValue": 10, "rainfallTimeStepType": "Daily", "rainfallTimeStepValue": 5, "routingTimeStepType": "Daily", "routingTimeStepValue": 2, "simulationTimeStepType": "Hourly", "simulationTimeStepValue": 1, "watershedArea": 1000, "numberOfSubbasins": 200, "numberOfHRUs": 10000, "demResolution": 30, "demSourceName": "Unknown", "demSourceURL": "http://dem-source.org", "landUseDataSourceName": "Unknown", "landUseDataSourceURL": "http://land-data.org", "soilDataSourceName": "Unknown", "soilDataSourceURL": "http://soil-data.org" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_scimeta_SWATModelInstance_resource_without_core_metadata(self): # testing bulk metadata update that includes only resource specific # metadata update # create a SWAT model resource self._create_resource(resource_type="SWATModelInstanceResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "modeloutput": {"includes_output": False}, "executedby": {"model_name": "id of a an existing model program resource"}, "modelobjective": { "swat_model_objectives": ["BMPs", "Hydrology", "Water quality"], "other_objectives": "some other objectives" }, "simulationtype": { "simulation_type_name": "Normal Simulation" }, "modelmethod": { "runoffCalculationMethod": "A test calculation method", "flowRoutingMethod": "A test flow routing method", "petEstimationMethod": "A test estimation method" }, "modelparameter": { "model_parameters": ["Crop rotation", "Tillage operation"], "other_parameters": "some other model parameters" }, "modelinput": { "warmupPeriodValue": 10, "rainfallTimeStepType": "Daily", "rainfallTimeStepValue": 5, "routingTimeStepType": "Daily", "routingTimeStepValue": 2, "simulationTimeStepType": "Hourly", "simulationTimeStepValue": 1, "watershedArea": 1000, "numberOfSubbasins": 200, "numberOfHRUs": 10000, "demResolution": 30, "demSourceName": "Unknown", "demSourceURL": "http://dem-source.org", "landUseDataSourceName": "Unknown", "landUseDataSourceURL": "http://land-data.org", "soilDataSourceName": "Unknown", "soilDataSourceURL": "http://soil-data.org" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_web_app_resource_with_core_metadata(self): # testing bulk metadata update that includes both core metadata and resource specific # metadata update # create a web app resource self._create_resource(resource_type="ToolResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "title": "New Title", "description": "New Description", "subjects": [ {"value": "subject1"}, {"value": "subject2"}, {"value": "subject3"} ], "contributors": [{ "name": "Test Name 1", "organization": "Org 1" }, { "name": "Test Name 2", "organization": "Org 2", "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "creators": [{ "name": "Creator", "organization": None, "identifiers": {"ORCID": "https://orcid.org/011", "ResearchGateID": "https://www.researchgate.net/001"} }], "coverages": [{ "type": "box", "value": { "northlimit": 43.19716728247476, "projection": "WGS 84 EPSG:4326", "name": "A whole bunch of the atlantic ocean", "units": "Decimal degrees", "southlimit": 23.8858376999, "eastlimit": -19.16015625, "westlimit": -62.75390625 } }], "dates": [ { "type": "valid", "start_date": "2016-12-07T00:00:00Z", "end_date": "2018-12-07T00:00:00Z" } ], "language": "fre", "rights": {"statement": "CCC", "url": "http://www.hydroshare.org"}, "sources": [ { "derived_from": "Source 3" }, { "derived_from": "Source 2" } ], "requesturlbase": { "value": "https://www.google.com" }, "toolversion": { "value": "1.12" }, "supportedrestypes": { "supported_res_types": ["NetcdfResource", "TimeSeriesResource"] }, "supportedsharingstatuses": { "sharing_status": ["Public", "Discoverable"] }, "toolicon": { "value": "https://www.hydroshare.org/static/img/logo-sm.png" }, "apphomepageurl": { "value": "https://mywebapp.com" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def test_put_web_app_resource_without_core_metadata(self): # testing bulk metadata update that includes only resource specific # metadata update # create a web app resource self._create_resource(resource_type="ToolResource") sysmeta_url = "/hsapi/resource/{res_id}/scimeta/elements/".format( res_id=self.resource.short_id) put_data = { "requesturlbase": { "value": "https://www.google.com" }, "toolversion": { "value": "1.12" }, "supportedrestypes": { "supported_res_types": ["NetcdfResource", "TimeSeriesResource"] }, "supportedsharingstatuses": { "sharing_status": ["Public", "Discoverable"] }, "toolicon": { "value": "https://www.hydroshare.org/static/img/logo-sm.png" }, "apphomepageurl": { "value": "https://mywebapp.com" } } response = self.client.put(sysmeta_url, put_data, format='json') self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.resource.delete() def _create_resource(self, resource_type, file_to_upload=None): files = () if file_to_upload is not None: files = (file_to_upload,) self.resource = resource.create_resource( resource_type=resource_type, owner=self.user, title="Testing bulk metadata update for resource type - {}".format(resource_type), files=files ) resource_post_create_actions(resource=self.resource, user=self.user, metadata=self.resource.metadata)
40.543133
100
0.477795
4,794
58,747
5.700459
0.09637
0.029859
0.01493
0.012295
0.948331
0.944379
0.936366
0.934133
0.925461
0.914447
0
0.047756
0.395476
58,747
1,448
101
40.571133
0.721744
0.04659
0
0.78518
0
0.001497
0.313112
0.040001
0
0
0
0
0.034431
1
0.018713
false
0
0.002994
0
0.022455
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
1623521771de97b8ec1428ee8e1cf9b7f82faa6f
188
py
Python
src/simplify/__init__.py
kratsg/simplify
9b9245dce6ff433f10f9e471e94d321106fdfaba
[ "BSD-3-Clause" ]
4
2020-11-25T16:02:07.000Z
2021-11-13T13:05:09.000Z
src/simplify/__init__.py
kratsg/simplify
9b9245dce6ff433f10f9e471e94d321106fdfaba
[ "BSD-3-Clause" ]
19
2020-11-25T16:05:17.000Z
2021-11-10T18:12:24.000Z
src/simplify/__init__.py
kratsg/simplify
9b9245dce6ff433f10f9e471e94d321106fdfaba
[ "BSD-3-Clause" ]
2
2020-11-25T16:10:48.000Z
2021-11-08T15:20:59.000Z
from . import model_tools # NOQA from . import plot # NOQA from . import fitter # NOQA from . import configuration # NOQA from . import yields # NOQA from . import simplified # NOQA
26.857143
35
0.712766
25
188
5.32
0.4
0.451128
0.526316
0
0
0
0
0
0
0
0
0
0.223404
188
6
36
31.333333
0.910959
0.154255
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
1646741defe104df90b88616f54259d3160f0c8a
75,967
py
Python
imageme.py
gimmeyoon/imageme
fdf83ecfc3ae28e5de32c315f95bd31f2b15cc3f
[ "MIT" ]
null
null
null
imageme.py
gimmeyoon/imageme
fdf83ecfc3ae28e5de32c315f95bd31f2b15cc3f
[ "MIT" ]
null
null
null
imageme.py
gimmeyoon/imageme
fdf83ecfc3ae28e5de32c315f95bd31f2b15cc3f
[ "MIT" ]
null
null
null
#!/usr/bin/python """ imageMe is a super simple image gallery server. Run imageme.py from the top level of an image directory to generate gallery index HTML and run a SimpleHTTPServer on the localhost. Imported as a module, use imageme.serve_dir(your_path) to do the same for any directory programmatically. When run as entry point, imageme.serve_dir('.') is what's called. """ # Dependencies import base64, io, os, re, sys, threading, http.server, socketserver # Attempt to import PIL - if it doesn't exist we won't be able to make use of # some performance enhancing goodness, but imageMe will still work fine PIL_ENABLED = False try: print('Attempting to import from PIL...') from PIL import Image PIL_ENABLED = True print('Success! Enjoy your supercharged imageMe.') except ImportError: print( 'WARNING: \'PIL\' module not found, so you won\'t get all the ' +\ 'performance you could out of imageMe. Install Pillow (' +\ 'https://github.com/python-pillow/Pillow) to enable support.' ) # Constants / configuration ## Filename of the generated index files INDEX_FILE_NAME = 'imageme.html' ## Regex for matching only image files IMAGE_FILE_REGEX = '^.+\.(png|jpg|jpeg|tif|tiff|gif|bmp)$' ## Images per row of the gallery tables IMAGES_PER_ROW = 3 ## Resampling mode to use when thumbnailing RESAMPLE = None if not PIL_ENABLED else Image.NEAREST ## Width in pixels of thumnbails generated with PIL THUMBNAIL_WIDTH = 800 ## Base64 data for an image notifying user of an unsupported image type UNSUPPORTED_IMAGE_TYPE_DATA = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAMgAyADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD36iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorn/+E68If9DXof8A4MYv/iqAOgopM1n6lrukaKYv7V1axsPNz5f2q5SLfjGcbiM4yPzoA0aKytP8SaFrFw1vpet6bfTqhdo7W7jkYKMDJCknGSOfetUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSc0A0ALRRRQAUUUUAFFFFABRRXg/xL+M/iLwv4z1DQNMtdPWK28vE0qM7ndGr/3gB970oA94orkfhhrmoeJPh3perarMJr2487zJAgTO2Z1HAAA4AFddQAUVieL/ABCnhXwnqOtvF5v2SLcsecbmJCqCewLEV89eF/jv4qh8Q2/9t3MN7pssoSWPyEjaNWONyFQDkdcHOenGc0AfUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSUDpzQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwDQB9/8Aavn/APaa6+F/+3v/ANo19Adq+f8A9prr4X/7e/8A2jQBz/7OP/JQ7/8A7BUn/o2Kvp+vmD9nH/kod/8A9gqT/wBGxV6D+0b/AMk9sP8AsKx/+ipaAPSrrxHoVgxW81rTrdh2mukT+Zptl4n8P6lKIrHXdMupD0SC7jcn8Aa+LvD3hrWPFeoPYaJZtd3SRmVow6rhAQCcsQOrD86n8R+DfEXhJo11zS5rMS/cclXRj6BlJGfbNAH3BRXgv7Pvja6u5LnwrqE7TLFF59m0jZKqCA0Y9uQQO3P4dJ8dvGF54a8LWtjpszwXWpyMhmQ4ZYlA3bT2J3KM+hNAHol/4h0XSn2ajrGn2b/3bi5SM/8AjxFOsdc0jU2xp+qWV2fS3uFk/kTXx34M8A6348urmDSBbqtsqtNLcSbUXdnaOASScHoO1dTL8A/GVrfW0bw2lzbvKiyy2twDsUkAthwp4HoKAPquiha+AaAPv6il718AUAff1VbzUbKwXdeXlvbL6zSqg/UirVfAVAH3DH4z8LSyCOPxLo7yHoq30RP/AKFW2DkA5B96+Ob34R+PLC0a5n8OTmNBk+VLHK2P91GJP5VneDPG+r+CdXS806ZjCzD7Ras37uZfQjsfRuo/SgD7YqG5u7azj8y6uIoE/vSuFH606CaO5gjnhkWSKRQ6OpyGBGQR+Br4H4oA+3x408KmTZ/wk2jbz/D9viz+W6tuN0kRXRw6MMhlOQR618ey/B3x/FbmdvDspQDOEniZvwUOT+lc74d8R6t4V1dNQ0m7e3nQ4YfwyD+669x9f0oA+5qKoaJq1rr2iWeq2T7re6iEqZ6gHsfccj8K8H+OPxLuzqUvhPR7loYIRi/mjbBkYjPlgjoAMZ9Tx2OQD3K+8TaBpc3k3+uaZaS/3Li7jjb8iRV+1u7a9gWe0uIriFvuyROHU/iK+LPD3gDxT4qtnudG0ea5t1JHmlljQkdQC5AJ+lZsU2r+GNcDRtc6dqdnJ3BR42HYj+h6igD7rpCQoJJAA6k9qwfBnie28YeFbLWrZdhmUrLHnJjkXhl/Pp7EGvmH42MT8XNbBJIAgAB7fuIzQB9RTeMfC9u+yfxJpEbZxte+iU/+hVpWl/Z6hF5tldwXMf8AehkDj8wa+N9D+GfjDxJpcWp6Tor3FnLu2S+fEgbBKnG5geoI/CvY/gL4L1fw5qGu3et6bPZTFIoIfNTG4ZLPg9xwnSgDxLx9LJL8QvEfmOz7dTuVXcc4AlbAHtX154E/5J54a/7BVr/6KWvkDx3/AMlD8S/9hW6/9GtUcHgvxVdW8Vxb+GtZmglQPHJHYSsrqRkEELggjnNAH3FRXlnwD0nUtH8CX1tqmn3djO2pSOsV1C0TFfKiGQGAOMgjPsa8q+MXxLu/Eeu3GiabctHoto5jYRtgXLg8s3qoPQdOM9+AD6QufFnhuyuDBdeINJgmBwY5b2NGB+hatWKWKeJZYZFkjYZV0YEEexFfGemfDDxprOlrqVjoFxJaOu5WZkQsvqqsQSPTAOayPDviHU/Cmtw6lplw8FxE2GXnbIueUYdwfT+tAH3NQeOar2V5BqFhb3tq/mQXESyxOP4lYAg/ka+T/i18Qbnxf4kntLedl0azkKW8SH5ZWHBkOOue3oPqcgH063jHwxHP5D+JNIWbOPLN9EGz9N2a+W/jWQ3xc1xlIIItyCO/+jx1m6N8MvGev6et/pugzy2rjckjukQceq72BYe4rndR0680m/lsdQtZbW7hOJIZkKsvGRkH1BBHsQaAPrT4Jf8AJINC/wC3j/0okrv64D4Jf8kg0L/t4/8ASiSu/oAp6tplprWk3Wm38QltLmMxypnGQffsfftXk/hr9n7SdF1+DUr/AFebUY7eQSRW3kCJSwORvO47hntxnvxwfRfHX/JPPEv/AGCrr/0U1fIHgT/kofhr/sK2v/o1aAPt6qd7qunacM32oWtr3/fzKn8yKy/HY/4t94l/7BV1/wCimr4qsrOfUb63sbWMyXNxKsUUYIy7scKPzIH40Afbdv4v8M3cohtvEWkzSngJHexsxPsA2a2WZUQsxAUDJJPAFfFPiH4feKvCtot3rWjS21szBfNDpIoJ6AlGIH41jwLqWryWmmW5uryQEx21spZ8ZOSEXtnqcUAfaieMfDEk/kp4k0h5c42LfRFvyzW3XxP4g+H/AIq8LWaXms6PNbWzEL5odJFBPQEoSB+Ndr8CfGN1pPi6LQJZmbTtSyqxseI5gCVYemcbT65HpQB9R1n3uu6PprFb/VbG0I6ie4WM/qRWhnvXxnYfCnx1qUYkt/Dd2qnp55WE/k5FAH1xY+JdB1SXytP1vTruQ9Et7qOQ/kCa1K+Gtf8AC2t+FrpLfW9Oms5JASm/BVwOuGBIOMjoa99/Z/8AGV3rOlXvh+/naaTT1WS2dzlvKPBXPopxj2bHQCgD2iqt7qNlpsXm395b2sX9+eVY1/MkVxHxa8ev4G8Mo1kV/tW+JjttwzsAHzSY74yPxI+lfKenaZqviPVBbafa3F/eyksVRS7H1J/xNAH2nB4w8M3LbbfxHpErekd9Gx/Rq2QwZQVIIIyCK+KfEfw+8UeErCO81zSmtLaSQRI/nRuC5BIHysccA/lXc/s5f8lCv/8AsFSf+jYqAPp6q19qNjpsHnX97b2kX9+eVUX8zXK/E3xuvgbwo97EEe/uG8m0jbpvI5Y+yjn64HevkrTdJ1rxXq7QWFvc6lfzEyOQSzHnlmY9Oe5NAH2vp/iDRdWcppur2F6w5ItrlJD/AOOk1o18O+IfCWveE544tc0yazMmfLZiGV/XDKSCfxr6B+BfxAuvEmnXGg6rcNNf2KCSGZ2y8sOcfNnqVJAz3DDvzQB7DRRRQB8SePpZJPiF4j3yM+3VLlV3EnAErYFfXngX/knnhn/sFWv/AKKWvkDx3/yUPxL/ANhW6/8ARrVHB4M8VXVvFcW/hrWZoJUDxyR2ErK6kZBBC4II5zQB9xUV5Z8A9J1HRvAt7b6pp93YztqcjrHdQtExXyohkBgDjIIz7GvmrxNosnh3xPqWkS5LWlw0YYjllz8rfiMH8aAPuiisDwRrq+JPBWkatuDPPbL5p/6aL8r/APjwNfM3xu1n+2PidfIjborBEtE+qjLf+PMw/CgD64qreajZWC7ry8t7ZfWaVUH6kV5x8BNDOlfDpLyRNs2pTtcHIwdg+RR9PlJ/4FXynQB9wx+M/C0sgjj8S6O8h6Kt9ET/AOhVtg5AOQfevjm9+EfjywtGuZ/Dk5jQZPlSxytj/dRiT+VZ3gzxvq/gnV0vNOmYwsw+0WrN+7mX0I7H0bqP0oA+2KKjhmjuLeOeGQSRSKHR1OQykZBH4Gvlz4xfEu78R63c6Hp1yY9FtJDGwjOPtLrwWYjqoPQdOM88YAPo658WeG7KcwXXiHSoJgcGOW9jRvyJzWrDNFcRLLDIkkbDKujZB/GvjTTPhj401jS11Kx0C4ktXXejs6IXX1VWYEg9sA5rI8PeItU8J65DqWmXDwXETYZedrrnlGHcH0/rQB9zUVi6rqUtz4JvdT0VjLLJp0lxZMgzvYxlkx+OK+II5ZIpUlid0kQhlZSQVI5yDQB980UfjRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwfrGlXOiaxeaZeIUuLWVonHuD1Hsev40AfeAYMoIOQRkV4B+0yQT4XAPOLo/8AomtfwL8cvDo8MWln4juZbO/tYlhaQwvKswUYDZUE5IHOQOc15D8UfHY8e+J1u7aKSGwto/Jtkk+8RnJZh2JPb0AoA6j9nEH/AIWFfnHH9lSf+jYq7/8AaO/5J5p//YVj/wDRU1Y37OPh6WG11bxBNGVjn22tuxGNwUkuR7Z2jPqD6Vs/tHf8k8sP+wrH/wCipaAPP/2cf+Sh3/8A2CpP/RsVeufHGFJPhLq7sATE8Dr7HzkX+RNeR/s4/wDJQ7//ALBUn/o2KvYPjb/ySHXP+3f/ANHx0AeA/BFivxc0UA8MJwff9xIf6V7X8cPBd74q8K211pkLT32mSM4hUZaSNgN4X1Pyqcd8HvivE/gl/wAle0L/ALeP/SeSvprxX450PwWbE63PJCl47LG6Rl9u0DJIHOOR0B60AfGulazqWhXq3mlX09ncKMCSFypI9D6j2PFex+E/2h7+G4itvFFnFcW5IVru2XZIv+0V6N+GK9P1bXPhj4rsturar4eu4yvBnuI1kX/dJIZT9MV8k6nFaQareQ6fM09lHO628rjBeMMQrH3IwfxoA+8hXwDX3N4Sjmh8GaFFcA+emn26yZ67hGoP618OywyQTPDKpSRGKsrDlSOCDQB9918AV9Z6d8d/A11YRzXeoTWU5Ub7eS2kcqe43IpB+ua+TMUAff1fANff1fANAH38a+NPivp0GlfFDXrW3VVj88ShVGADIiuR+bGvoM/HbwF9jM/9pXBkxn7P9kk3n2zjb+tfLniHWrjxH4hv9YuVCy3kzSlAchQeij6DA/CgD60+D1xJdfCfQJJCSwikjGfRZXUfoBXxxX3F4M0VvDngzSNIfAltrZFlwcjeeWx/wImvh2gD79PWvlX4/adBY/Eoywqqm9so7iTaMfPlkz+SCvYbb47eA57TzpNRubaTGfIltJC+fTKgr+tfOPjvxZJ418W3WstEYYn2xwRMclI1GAD7nkn3JoA+gP2ebmSf4byxuSVg1CWNM9htRv5sa+Yb65kvL65upiTLNK0jk9SScnP519d/B7QZfD/w102C4jKXFzuupFIwRvOVz77Qua+afiT4al8LeO9TsGTbA8hntjjhonJK4+nK/VTQB9jaZZQ6bpdpY26hYLeFIowOyqAB/KvAf2lLKCPVfD98qjzp4ZonI6lUKEf+htW/4C+Ofh9fDNrZeJLiWzv7WIRGbyXkSYKMBvlBIJA5BHWvIvih46HjvxSL23jkisLaPybZJPvYySWI7Ent6AUAeofs03MjWPiO1JPlxywSKPdhID+iLXm/xt/5K9rv/bv/AOiI69r+AnhyXRfAjX9zGUn1ObzlUjBEQGEz9fmYezCvFPjb/wAle13/ALd//REdAH0B8Ev+SQ6F/wBvH/o+Su/rgPgl/wAkh0L/ALeP/R8ld/QB8Q+O/wDkofiX/sK3X/o1q+v/AAJ/yTzw1/2CrX/0UtfJvxO0+TT/AImeIYpFwXvXnA9pDvB/Jq9n+Hnxo8LWXgzTtN128ksryxhW3/1DusioMKQUB7AZzjmgD2qvgIV9t+EfGuj+N7O6utHaZorabyWMsewscA5Az05746GvjTWtHutB1u80q9QpcWsrRv74PBHsRgj2NAHp/wDw0Z4uH/MN0Pn/AKYy/wDxyvNPEeu3HibxBd6zdQ28M90waRLdSEBAAyASTzjJ56k19H+Hfj/4Wv7CP+2jNpl4BiUeS0sZPqpQE49iPz61Pqfx+8F2cZNpJe379hDblB+JfbxQBs/BuZpvhNoLvkkRypz6LK4H6AV8eCvv32r4T13Rbvw9rt7pN6pWe0laNuCA2OjD2IwR9aAPusEYwMfSvkL42/8AJXtd/wC3f/0RHXsHhb4+eGr3SYR4hmk0/UUULMRA8kcjf3l2AkZ64I46V4d8T9csPEnxE1XV9LmM1lceV5cjIULbYkU8HB6qaAPpD4Jf8kg0L/t4/wDSiSu/rgPgn/ySHQx/18f+lEld/QBgeOv+SeeJf+wVdf8Aopq+QPAn/JQ/DX/YVtf/AEatfX/jr/knniX/ALBV1/6KavkDwJ/yUPw1/wBhW1/9GrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yUPw1/2FbX/0atfX/jv/AJJ54l/7BV1/6KavkDwJ/wAlD8Nf9hW1/wDRq0AfXXxBgSf4c+JEkG5Rptw4B9VjYj9QK+R/ATlPiH4aI76pbD85VH9a+vfHf/JPPEv/AGCrr/0U1fIHgT/koXhr/sK2v/o1aAPrr4gQJP8ADrxIkiggaZcOB7rGWH6gV8jeA2K/EPw0Qcf8TS2H5yrX1946/wCSe+Jf+wVdf+imr5A8Cf8AJQvDX/YVtf8A0atAH2fresWXh/RrnVdRlEVpbJvkbGT7ADuScAD1NfNGs/tAeL76dzpotNMgz8ipEJXA92fIJ/AV6f8AtCxTSfDeJos7I9QiaXH93a45/wCBFa8S+FHivS/BvjUalq8TvbNbvCJETc0LEqd+PoCOOcMfpQBR8SfEXxP4u0mLT9cvkuoI5hMh+zxowYBl6qo4wxrtP2cSf+FhagM8HSpD/wCRYq1/jT8RfC/i3wdZ6foepG6uY79JnX7PJGAgjkXOWUd2FZH7OI/4uHqH/YKk/wDRsVAEn7SDSf8ACdaYpz5Y01Sv182TP9K7L9m6O2HhHVpV2/amv9snrsEa7fwyX/WtP44+BrrxV4et9R0yIzahppZvJUfNLE2NwHqRgED6454r5r0PxDq/hq/+26PfTWdxjaWjPDD0IPBHQ4IoA+iv2jv+Se6f/wBhWP8A9FS15/8As4/8lCv/APsFSf8Ao2KuC8R+N/EviyKNNb1aa7iRt6xYVEDYI3bVAGcE84zzXe/s4/8AJQr/AP7BUn/o2GgCx+0hcyN400q1JPlx6cJFHu0jg/ogrkPBHxP1nwFY3VrpVjpsouZRJJJcxuz8DAGVdeByenc16h+0b4dmns9L8QwR7o7fdbXBH8IYgoT7Z3D6ketcJ8J/ikvgOW5sNRgkn0q6cSExYLwyYxuAOMggAEZ7DHoQCv4s+Mmv+MfD82i6lYaSkEjK2+GGRXUqwOQTIQDxjp0Jo+BszRfFnSkUnEsc6Ng9vKc/zAr3V/jj4ASLcNakdv7i2c2f1UD9a3PBHjfTvHmm3WoabDPDDBcm3KzgB2wqtnAJ4+b17UAdRRRRQB8Q+O/+Sh+Jf+wrdf8Ao1q+v/Av/JPPDP8A2CrX/wBFLXyb8TrCTTviZ4hhlUqXvZJx7iQ7wfyavZ/h58aPC1n4M03TNcu5LG8sYFtzmB3WRUG1SCgP8IGc45zQB7Sa+e/2jPDJjutN8TQR/LKPslwQOjDLIT9RuH/ARXsfhHxro/jezurvR2maG2m8ljKmwk4ByBnpz3x0NXfE2hW/iXw3f6NdYEV3CU3YzsbqrfgQD+FAHiv7OfiRI4dY8P3MoVY8X0O44wOFk/8AZD+deESyXGo37yuWmubmUseOXdj/ADJNJDcXFlM7RSSQybXibHB2sCrKfqCQfrXpHwJ8N/238QYr6VN1tpSG4YkceYeIx9ckt/wCgD6osrSGwsbezt0CQW8axRr6KoAA/IV8EV9/V8A0Affxr40+K+nQaV8UNetbdVWPzxKFUYAMiK5H5sa+gz8dvAX2Mz/2lcGTGfs/2STefbONv618ueIdauPEfiG/1i5ULLeTNKUByFB6KPoMD8KAPrP4PXEl18J9AkkJLCKSMZ9FldR+gFfHY6E+9fb/AIM0VvDng3SNIfAltrZVlxyN5GWx/wACJr4u1rSLvQdavNKvYylzaStG4PfHQj2IwQe4IoA9Q/4aN8Xf9A7Q/wDvxL/8crzbxHr1z4n8QXmtXcNvBcXbB5EtlKoCFAyASTzjJ56k19GeHvj/AOFr6xj/ALaM2mXgAEg8lpYye+0rk4+o/PrVjUvj94Ls0P2SW9v37LDblB+JfbQBsfBuZp/hLoLuSSElTn0WZwP0ArqYdA0W31E6jDpFhFfNyblLZBIf+BAZrRxXgP8Aw0x/1KP/AJUv/tVAHv2aWgDFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFch44+HOh+O7VBqEbQ3kS7YbuHAkQf3T2Zc9j+GK6+igD5ouv2cPEqSkWmsaTLH2aUyRnH0CsP1rd8P/s4RxXCTeIdZE0anLW9khXd9XbnH0X8RXvVFAEcEEVtBHBBGkcMahERBhVUDAAHYVynxH8Df8LA8PW+k/wBo/YPJu1ufN8jzc4R1243L/fznPauvooA8w+HHwf8A+Ff+IbjVv7d+3+baNbeV9k8rGXRt2d7f3MYx3rsPG/hn/hMfCF9oP2v7H9q8v9/5fmbdsiv93Iznbjr3rfooA8g8E/Az/hDvF9jr/wDwkf2z7L5n7j7F5e7dGyfe8w4xuz07Vs/Ez4Vj4gz2l0mrvZT2sRjRGiEiHJznqCD789OlejUUAfMj/s5eKw/yapopX1MkoP5COuv8Kfs86dp15FeeIdQGomMhhaxR7IiR/eJOWHtx75r2zFFABXmvj/4N6T41u21O3uG03VWGHmRNyS46b1yOfcEe+a9KooA+ZG/Zx8WB8DVNFKepllB/Ly61tP8A2a7kuDqXiOFFHVba2Lk/8CYjH5V9C0UAFfANff1fIP8AwpP4h/8AQv8A/k7b/wDxygDqLv8AZv8AEaTlbTWNKlhzw0xkjbH0CsP1rvfAfwN0vwtfQ6pqt0NT1CIh4V8vbFE3rg5LEdicY9M4NesZzS0AAr4Br7+zjtXyD/wpT4g/9C//AOTtv/8AF0AdZqH7N2uxzkabrenTw54NyrxN+Shv510/g79n2w0u9jvvEd6motGQy2kSEQ5H98nlh7YA9c9K9r60UAJisTxT4S0jxhpLadrFt5sed0bocSRN/eVux/Q9xW5RQB82X37N2vpOw07WtMnhzw1wJIm/IK3866bwn+zzYafdR3fiO/GoFCGFrApWIkf3ieWHtgV7bRQADivIPG3wM/4THxffa9/wkf2P7V5f7j7D5m3bGqfe8wZztz0716/RQBz/AIJ8M/8ACHeELHQftn2z7L5n7/yvL3bpGf7uTjG7HXtXQUUUAcV8QPhnpPj+1ia5ke11CBSsN3EoJA/usv8AEuecZBz0Iyc+NP8As4+KRKRHqujtHnhmklDflsP86+mqKAPPvhd8Npfh5bagJtVF7JfGMuqRbEj2bumSc/ePPHatjxn8P9C8c2ax6pCy3EQIhuoTtkj9s9CPYg/nXU0UAfNV3+zd4ijmIsta0uaLPDTeZEfyCt/Ormm/s2agzqdT8QWsS91toWkJ/FiuPyr6JooAK5bxp4A0Px1ZJDqkLLPECILqEgSR+2ehHsf0PNdTRQB81Xn7N/iFJiLHWtLmizw03mRHH0Ct/OprL9m3WJGH27X7GBe5gieX9Dtr6PxRQBieEPDUHhDwtZaFbzyXEVqHxLIAGbc7Oc492P4Vt0UUAZ+u6Z/bXh7U9J87yft1pLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1b/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBn69pn9t+HtT0nzvJ+3Wktt5u3ds3oV3YyM4znGRXkGhfs8/2J4h0zVv+Eo877DdxXPlf2ft37HDbc+YcZxjODXt9FAGfrumf234d1PSfO8n7day23m7d2zehXdjIzjOcZFeQaF+zz/YniHTNV/4SjzvsN3Fc+V/Z+3fscNtz5hxnGM4Ne30UAZ+u6Z/bXh7U9K87yfttpLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1X/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBBe2dtqNlNZXkKTW06GOWNxkMp4INeAaz+zdeC4ZtE16B4SfljvUKso9Cy5z9cCvoaigD5li/Zx8Ulh52q6Oi+qSSsf1QV6b8NvhCngDWJ9VfWWvZ5rY25RYPLVQWVs53En7nt1r02igDI8SeIdP8LaHNq2qO6WkTKrFF3HLMFGB365+grnZfFHwz8RKJr3UvDl3kcfbvK3flJz+lL8U/BeoeOvC8Wl6dewW0kdws588Ha+FYBcjJHLZ6HpXgFx8DPH0EhWPSYbhRxviu4gD/AN9MD+lAG78Z/GPhnUNNsfDfhWO0NtDcfabiS0iCRbgpVVXAAbhmJPTp15xe/Zt0uR9c1rVtp8qK2W2DY6s7BsD6bP1FZGjfs++Lb6Zf7Tls9Mgz8xaTzXx7BeD+LCvpDw/oGn+GdFt9J0yARWsC4A6lm7sx7knnP8qANCWJJomilRXjcFWVhkMDwQR3FeF+I/2cree4efw7q/2ZGORbXalwv0cc4+oJ9693ooA+Zof2cPFDOPP1bR0T1jeVz+RQfzr2H4bfDmP4eWF5Aupy30l2yM5MYjRSoP3Rknv3PYV3FFABRRRQBxXxA+GekePraNrl2tdRgUrDdxqCQOu1h/EueccY5weTnxp/2cfFXm4j1XR2jzwzSSg4+mw/zr6aooA8++F/w3l+HltfibVBeyXvll1SLYibN3TJOc7uvHQVtL8RPBsiO3/CTaYuwkMr3Ko2R/snB/Sunr5JufgZ4+t5THFpcNyvaSK7jA/8eYH9KAOZ8ca3aeJfGmq6vY232e3upt0aYwTwBuI9WxuPuTX1D8IvCjeE/ANrDPGUvrsm6uQRyrMBtU/RQvHrmvPvAXwCntNSh1PxXLA6QsHSwhO8OR08xsYx7DOfXtXvlABXwDX39XyD/wAKT+If/Qv/APk7b/8AxygDqLv9m/xGk5W01jSpYc8NMZI2x9ArD9a73wH8DdL8LX0OqardDU9QiIeFfL2xRN64OSxHYnGPTODXrGc0tACYrlvGnw+0LxzZLFqkLLcRAiG6hOJI/bPcexzXVUUAfNV3+zf4iSU/YtZ0qaPs03mRn8grfzq5p37NmoPIp1TxDbRJnkW0LSE/i23H5GvonFFADXdY0Z3IVVGSSeAK+CrW1lvbuG1t1LzTyLHGo/iZiAB+Zr7j8R6Odf8ADt/pIu5LQXkJhaaNQWVTwRg+oyPoa8v+H/wSk8I+MxrF/qNvfQ28bfZQkZRhIeNzA5AwM45PJ9qAPZOlFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVBeXkGn2Vxe3UgitreNpZZG6KijJP5A0AT0V53B8avBt5rVlpdjdXd5PeXEdvG0duyqGdgoJ37eMn/61eiUAFFFFABRRRQAUUUUAFFFFABTfrinV8a/Fa7v7n4ma6NQZy8VyYolY8LEPuAD0K4P4570AfZI/KlrzX4E3V/dfDK3N8zssc8kdsznJMQxjn0DbwPpXpVABSYA6ClooAKKKKACiiigAorJ8QeJdI8Laet/rV6tpbNII1cqzZYgnGFBJ4B7dqwfCvxP8O+Mtdm0nRmupZYrdrhpJIdibQyrgZOc5YdqAO0ooooAKKKKACkOQcUtfIfxvlkb4ta1GzsUQQBVJ4X9xGeB2oA+u+aM8cHmuB+CX/JIdC/7eP/SiSvIv2jpZB4+sIhI3l/2XG2zPGfNl5x60AfTYOR0xQTXkP7OX/JPL/wD7Csn/AKKirlP2kbq/GtaNalnGnfZmkQA4VpdxDfUhdv50AfRINLXzr+zddX51jWrQM5077OsjDPyrLuwuPQld312j0FfRVABRRRQAUUUUAFFFFABRRRQAUUV5f4z+Nmk+D/EV1oj6Xe3V3a7PMYMqRncgcYOSejDsO9AHqFFeBN+0wobC+EmK+p1HB/8ARVbWk/tE+G7yVY9T0++08k8yDEyL9cYb8lNAHsdITUVpd21/aRXVpPHPbyrujliYMrD1BHWvnv8AaRur8a1o1qWcad9maRADhWl3EN9SF2/nQB9Eg0Hivnb9m66vzrGtWgZzp32dZGGflWXdhcehK7vrtHoKvftMSyJH4ZjV2VH+1FlBwGx5OMigD3vmjP514D+zL/zNP/bp/wC1q6P9ouWSL4e2PluybtUjVtpxkeVLwfagD1sc8EH60tfMP7OP/JQtQ/7BUn/o2Kvp6gAooooAKKKKACiiigAooooAKKKKADFFUdY1iw0HS59T1S5W2soMeZKwJC5IUdOepA/GuR0T4u+FfEXia00LSpru4ubovsk8gpGNqMxzuweinse34AHeUYFFFAB0ooooAKKKKACiiigAowKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKwPHX/JPPE3/YKuv/RTVv1geOv+SeeJv+wVdf8AopqAPjXw1qMOj+KdI1S4V3gs72G4kWMZYqjhiBkjnA9a+iv+GjfB/wD0Ddc/78Rf/Ha+cdB0wa14h0zSvO8k3t3Fb+bt3bN7hd2MjOM5xkV7d/wzP/1N3/lN/wDttAHu17eQadYXF9dSCK2t4mmlcg/KijLHjngAmvI9U/aM8OWzsmnaXqF7g4DvthVvpyT+YFejeO/+SeeJf+wVdf8Aopq+MvDumx6z4m0nS5XaOO9vIbd3TqodwpIz35oA9zg/aWtmkxceF5Y09Y70OfyKCvUte8b2HhzwfbeJruyv5LKZYmMcKKZIxIMjcCwHUgHBPJrz+6/Zw8ONCRZ6xqsUuPvS+XIPyCr/ADr1PX9Gh17w3qGjzALHd27wg4+4SOCPocH8KAOM8L/Gnw14t8Q22i2VrqcFzc7vLe5ijVCVUsRkOTnAPavRq+DtL1GfSdXs9Stjie0mSePP95WyP5V9ueJtYTQPC2p6uSv+i2zypnozAfKPxOB+NAHm7/tFeEFdl+wa22CRuEEWD9P3ter2dyL2xt7oRSRCaNZBHKAGTIzhgCRkZ55NfDOg6TLruv2GlQZ8y7uEhBAzjcQCfoBk/hX3bQAV5T46+J/gbSvEU+heIvD9xqVxZFTuazhmQbkVxt3uD0YdhzXq1fIPxt/5K9rv/bv/AOiI6APqXwt4hsvFnhq01vT4porW53+Wk6hXG12Q5AJHVT3rlPF3xl8N+D9Yn0i6hv7m+gC+YkES7V3KGGWZh2YdM1L8Ev8AkkOhf9vH/pRJXgHxs/5K9rv/AGw/9ER0AeiTftLQKx8nwrI69i98F/lGa3fDv7QPhvVrtLfU7S50p3IAlkYSRA/7TDBH1xj1IrlPh58FvD/i3wNp2uX9/qcdxdeYGSCRFQbZGQdUJ6KO9eXeOvCU3grxXc6NNN56IFkhm27fMRuQcdu4+ooA+2a5nxh460LwRZpcavcsJJM+VbRDdLLj0HAA9yQKwfglrUus/DSy89zJLZSPaFjySFwVH4Kyj8K+cfiXeXF98SvEMtyxLpfSQrk9ERiqj/vkCgD1iT9paES4j8KyNH/ea/AP5CMj9a7fwL8XdH8c6qdMtrC+tbxYjKRIFZNowD8wOe47d64/4cfDv4a+JfCdjK4XUdTaFWuka8dJIpMfMNisuADwCRyO5r0Dwb8NND8D6ne3ukNdE3UaxlJ3DiMA5wpwDzx1z0oA5P8AaO/5J7p//YVj/wDRUteN/CbxrpvgTxTdapqkF3NBLZPbqtqiswYujZO5lGMIe/pXsn7R3/JPdP8A+wrH/wCipa8Q+HHgb/hYHiG40r+0fsHk2jXPm+R5ucOi7cbl/v5zntQB71pPx78LazrNjpdtYayk95cR28bSQxBQzsFBJEhOMnng16B4i1608MaBd6zfJM1taqGdYVDOcsBwMjufWvJNC/Z6/sTxDpmq/wDCUed9iu4rjyv7P279jhsZ8w4zjGcGvW/EekL4g8N6lpDuIxeW7wiQru2EjAbHfBwce1AHjt1+0rZo5Fr4ZnlXsZbsJ/JWrS0L9ofw9qFykGq6fdaZvOPNDiaNfqQAw/AGm2/7OPhlYR9o1fV5Hx1jaNB+RQ/zrxP4heDJfAvil9Jef7RE0SzwTbdpeMkgZHYgqR+FAH2kCCMg8H0r5B+Nv/JXtd/7d/8A0RHXuPwG1WXU/hlBFKxY2NzJaqScnaMOB+AfH0FeHfG3/kr2u/8Abv8A+iI6APf/AIJf8kh0L/t4/wDSiSvIP2jv+Sh2H/YKj/8ARstev/BL/kkOhf8Abx/6USV5B+0d/wAlDsP+wVH/AOjZaAO//Zx/5J5f/wDYVk/9FRV1HxF8c+HvB9tZW3iHTbi/g1DzAsUcEcqfJtzuDsB/GMda5f8AZx/5J5f/APYVk/8ARUVc/wDtM9PC/wD29/8AtGgD0j4deOfD3jG2vLbw9ptxYQaf5YaKSCOJBv3Y2hGI/hOenarnjT4gaL4DgtZNX+0s91v8mO3j3M2zbu6kAfeHUjrXlf7MvXxT/wBun/taj9pr/mV/+3v/ANo0AWbn9pWyR8Wvhm4lX1lu1jP5BW/nU+mftI6RPMq6noN3aITgvBMs2Pcghf0rz74Q/DnS/iAdY/tO6vIFsfJ2C1ZVLb9+c7lP9wfnTPit8L18ANZXVjdzXWnXTNGDMo3xuOcEjAORkjgdDQB9V2V7bajZw3lnPHcW0yB45Y23KwPcGpzjBzjGO9fPv7N2tS/atZ0N5CYTGt3En90g7XI+uU/KtD9onxRPZ2On+G7aQoLwG4utpwWjU4RfoW3H/gIoA1/EH7QPhnSrh7fTLa51Z16yR4jiJ9mPJ+u3HuawoP2lrdmH2jwtKi9yl6GP6oP515z8M/hpcfEG+uHe5NpptptE0yruZmPRVB4zjPPbj1ro/ix8J9F8C+GLXVNMvdQmllvFtmS6dGGCjtkbVHdBQB7J4K+Kfh/x1fS2OmR3sN3FCZmiuIgPkDAEggkdWFdxXzB+zj/yUO//AOwVJ/6Nir6foAK+Qfjb/wAle1z/ALd//REdfX1fIPxs/wCSv65/27/+iI6AOj+E/wAKND8d+FrrVdTu9QhlivXt1S1dFUqERsncrc5Y1ifFD4WSeADa3ltem8025cxqzqFeNwMhWxwcgEgjHQ5Hr2HwW+Inhbwl4NvLDXNU+y3UmoPMsf2eV8oY4wDlVI6qfyrA+MPxSsfG8NnpWjwzCxtpfOeaZdplfBAwOwAZuvJz0GOQCT9n3xBPYeOX0UyE2uowv+7zwJEG4N/3yGH417V8RfHPh7wfbWVt4h024v7fUPMCxRwRyp8m3O5XYD+MY614Z8AdHmvviPHqCo3kadbySO+OAzqUUfU7mP8AwE11f7TX/Mr/APb3/wC0aAPSPh1458PeMba8tvD2m3FhBp/lhopII4kG/djaEYj+E56dq83/AGmenhb/ALe//aNH7MvXxT/26f8Ataj9pr/mV/8At7/9o0AH7Mv/ADNP/bp/7WroP2jv+Se6f/2FY/8A0VLXP/sy/wDM0/8Abp/7WroP2jv+Se6f/wBhWP8A9FS0Aef/ALOP/JQtQ/7BUn/o2Kvp48CvmH9nH/koWof9gqT/ANGxV7v8RPEx8I+BtS1aMj7SqCO3/wCujHap/DO76CgDL8ZfFrw14Mna0uJZLzUVxutbUAlPTcScL9OvtXB2/wC0raNOFufDE0cOeXivA7Y/3SgH614l4X8O3vizxHaaNY48+4bBdukagZZj7AD+nevb9e/Z0sE0WWTQ9UvX1GNCyx3WwpKR/CMKNufU5oA9f8O+JtH8V6aL/Rr1LmDO1gMhoz6MDyDXP+Nvido3gK+trXVrHUpTcxmSOS1jRkODgglnBz07dxXyt4P8TXXhHxRZaxauw8lwJkU/62I/eQ/UfkcHtX0X8f8AQ/7T+Hv9oRpum0y4WUnHPlt8jD8yp/4DQB1ngjx5pXjzT7q80qK6iW3l8p47lVVskAg4VmGDkjr2NS+NPGmmeBdHi1PVEuJIpZhAqWyqzliCc4ZlGMKe/pXg37O2sfY/G95pbvhL+0O0eskZ3D/x0vVj9o3W/tXinTdGR8pY25lkAPR5D0P/AAFVP40Ae2eCPHmk+PLC5u9KiuoltpBE8dyqq2cZBG1mGPx7Gjxt480nwFp9tearFdTLcy+Ukdqis2cZJIZlGPx7ivCv2d9Z+xeN7vS3fCahanaPWSM7h/46Xo/aJ1j7Z44s9MR8pYWg3D+7JIdx/wDHQlAHs/gn4oaN49vrq10qx1KI20Qkkkuo0VeTgAFXY5PPbsea7ccivK/gFoX9l/DwX8iYm1Odpskc7F+RR+jH/gVeqUAcB8bf+SQa7/27/wDpRHXzT8O/EVn4S8dabrl/HPJbW3m70gALndE6DAJA6sO9fS3xt/5JBrv/AG7/APpRHXzD4I8NDxh4vsdBF39k+1eZ+/8AL8zbtjZ/u5Gc7cdR1oA9+/4aN8H/APQN1z/vxF/8dr1+vAP+GZ/+pu/8pv8A9truvjP4sm8K+BZBZSmO+1CQW0TqcMgIJdh+Axn1YGgCHxd8bPDPhe7ksYfN1S9jJWSO2I2RnuGc8Z9hn04rlrH9pPTpbhV1Dw3c28JPLwXKykfgVT+deP8AgHwVd+O/EaaZbSiCFEMtxcFc+Wg44HckkAD/AAr03xx8ArbSPDtzqnh6/vLiW1jMsttdbWLoBklSoHIGTgjnFAHvOkavp+u6bFqGl3cV1aS/ckjOR9D3B9jzV6vjv4UeLJvCvjqwYyEWV7IttdJngqxwGP8Aukhs9eo719iYxxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjrn4e+JR/wBQq6/9FNW/VbULOPUdNurGb/V3MLwv9GBB/nQB8VeBePiD4aJOANVtcn/tqtfbxr4JkiuLC7eKaOSC5gk2srAqyMD0x1BBr1F/2hfGbW3kiDSUfGPOW3Yt9eXK/pQB9C+O/wDknniX/sFXX/opq+QfAn/JQvDX/YVtf/Rq19Y+JL9dU+Eer6gn3brQppxj/agLf1r5O8Cf8lC8Nf8AYVtf/Rq0Afb1FFFAHyD8Y/C58NfEK9MabbTUP9MgwOBuJ3j8GDcehFbHiX4grq3wO0LQxNm+8829yuefLgAK5+oaPn1U17R8XvCX/CWeBbkQR77+xzdW3HLYHzJ+K549QtfIHtQB7R+zr4dN54nvtelTMVhF5URI/wCWsnBI+ihv++hX0rXKfDfwz/wiXgXTdNkQLclPOueOfNfkg/Thf+AiuroAK+Qfjb/yV7Xf+3f/ANER19fV8g/G0f8AF3tc/wC3f/0RHQB7/wDBL/kkOhf9vH/pRJXz/wDG3/kr2u/9u/8A6Ijr6A+CX/JIdC/7eP8A0fJXgHxsGfi9rn/bv/6IjoA9/wDgn/ySHQv+3j/0okrx/wDaNGPiHYf9gqP/ANGy17B8E/8AkkOhf9vH/o+SvH/2jv8AkoVh/wBgqP8A9Gy0Aegfs4/8k91D/sKyf+ioqb8Vfg3L4s1Btd0GWKLUmULcQSnak+BgMG7NgAc8HA6d3fs48fD2/wD+wrJ/6KirktS+PuvaT4t1a2jtbG+02G8kjgDgo4RWIHzA45x3FAHkGseH9X8PXP2fV9NubKU/dE0ZUN7qejD3Ga9O+C/xH1TT/E9l4d1C8ludMvG8iJZWLGBz93aT2JwMdOc9uZvE/wAf5/EPhu/0iLw5FbfbIWheWS6Mu1WGCQuxefQ544rk/hD4du9f+IulvDGxt7CZbueUDhAh3KCfUsAB+PpQB7J+0d/yT2w/7Csf/oqWvP8A9nIgfEO/BPXSpAP+/sVey/F3w5P4m+HV9a2kZkurcrdQxqMlinUD1O0tgdzivk/w/r2o+GNat9W0qfybuEnaduQQRggg9QR/nNAH3VmvHPjr8QL/AMN2lnoWj3D215eoZZriM4eOLOAFPYkg89Rj3rg4f2gPFl3qVks66da2onj8/wAiA5ZNw3DLs2MjPTmtz9o7w9dNeaX4ijjZrYQ/ZJmHSMhiy5+u5vyoA8z8GfDfxB47WeXSkgS3hbY9xcyFU3YztGASTjngdx61T8Z+DdT8DaxFpeqSW0k8kAnVrZyy7SzKOSAeqmr3gv4l+IPAkdxBpTW8ltO294LmMsm7GNwwQQcY79hVLxp401Hx1rMWqapFbRzxW626rbIyrtDM3QsTnLHvQB73+zj/AMk91D/sKyf+ioq8g+Nv/JXtd/7d/wD0RHXr/wCzjx8PdQ/7Csn/AKKirzb4/aJcWHxDfVGjb7NqUCMkmONyKEZfqAqn/gQoA9p+CR/4tFoY7/6R/wCj5K8g/aO/5KHYf9gqP/0bLVP4O+OfEWm+ItN8MWcsUmm3l0N8U0ZbyweXKEEEHAJ9M9q6L9pDQ7garpGupGWtmtzaO4HCMrFlB+u5sfQ0AdT+zkcfDzUP+wrJ/wCioq5/9pnp4W/7e/8A2jXmnw+8ceIvCmqLZ6JJE0d9Mkb286b0ZicA8EEHnsa9L/aYHHhfp/y9/wDtGgA/Zl6+Kf8At0/9rUftNf8AMr/9vf8A7Ro/Zm4/4Sj/ALdP/a1L+01z/wAIv/29/wDtGgBP2Zf+Zp/7dP8A2tXQftHD/i3unn/qKx/+ipa5/wDZm4/4Sn/t0/8Aa1dB+0dz8PbD/sKx/wDoqWgDgP2cf+Shah/2CpP/AEbFVn9pGzkTxhpF6QfLmsPJUnplJGJ/9GCq37OQ/wCLg6h0/wCQVJ/6Nir3rxv4NsfHHhyXSrxjE+RJBcKMtDIM4IHcc4I7g9utAHlH7O/inTLXStQ8PXVzFBdvdfaYRIwXzQyqpC56kbBx159jWp+0ZfWjeCdPshdQm7/tJJfIEg37BFIC23rjLDn3FeN6t8K/G2kXLxS+Hry4APElmnnqw9RsyfwIB9qzNV8GeI9C0dNU1bSbiys5JlhU3GEZnIY42k7uinnFAHf/ALOP/JQ7/wD7BUn/AKNir6fr5h/ZxB/4WFqBxwNKkB/7+xV9PUAFfIPxt/5K9rn/AG7/APoiOvr6vkH42/8AJXtc/wC3f/0RHQAzwV8J9d8eaNNqml3enQwRXBt2F1I6sWCqxICoRjDjv613ulfs2XBlVtZ1+NYwfmjs4Sxb6M2Mf98mul/Zy/5J5qH/AGFZP/RUVev0AZ2h6FpvhzSotN0m0S1tY+Qi9z3JPUk+prxH9pnp4X/7e/8A2jXv9eA/tMjI8L/9vf8A7RoAT9mXr4p/7dP/AGtR+01/zK//AG9/+0aP2ZuP+Eo/7dP/AGtWv+0ZolxfeGtL1aCNnTT53WbA+6sgUbj7ZRR/wIUAZH7Mpx/wlHv9k/8Aa1dB+0d/yT2w/wCwrH/6KlrwHwl4y1nwVqbX2jzqjyJsljkXcki9cMPr6YPvX0n8ctDuNZ+Gs7WsZkksbhLsooySqhlY/grE/QUAeVfs4/8AJQr/AP7BUn/o2KvRP2iI3k+HNsyAlY9SiZ/YeXIP5kV84+HvEOp+F9Yh1XSZ/JuosgHAIYHqpB6g19j3+j/8Jh4COm6sqRy6hZJ52xeI5SobKg/3XwQD6CgD58/Z5nhi+JEySEBptPlSPPdt6Nx/wFWr6lyP618JMmqeG9b2ss9jqdlKDg5V43HSu41343+MNe0aTTJXs7SKVCkslpEVkkUjkEljjPtigDzevvXULKHUtOurG4XdBcxNFIvqrAg/oa+Rfhb4DuvGnimBpYG/si1kEl5KR8rAc+WPUt0x2GTX2FQB8NeGNWk8OeLNM1TDA2d0kjr3Kg/Mv4jIrUmab4hfFBiu7/ibaiAvqkRbA/75T+VbPxs8Of2B8RryaNNttqQ+2R46bmJDj/voE/iK6f8AZz8PfavEOo6/KmY7KEQQkj/lo/Uj6KCP+B0AedQmfwD8S137t2k6lhvV0V+fwZf51n+JtWk8SeLNS1UBmN7dPJGvUhSflX8BgV6l+0X4f+x+JtP12JMR30JilI/56R4wT9VKj/gNc98EvDZ1/wCIlrcSR7rbTF+1yZHG4HCD67iD/wABNAH1bY2cOnadbWNsuyC2iWGNfRVGAPyFWKKKAOA+NnPwh13/ALYf+j46+f8A4J8fF3QiTgfvx/5Akr6h8a6G3iTwXq+jx4825t2EWTgeYPmXJ7DcBXxVp99daTqNvfWcjQ3dtIJI5B1VgcjigD7zzivBf2mInNt4alGfLV7lW+pEZH8jXJXf7QnjS4tzHFHpdq3TzYbdi3/j7MP0r3z4ieEV8beDrrSlKpdAia1kbosq9M+gIJU/71AHkv7NM8K3viSBiPOeO3dB32qZA36sv6V79eXENrY3FxOQIYo2eQnptAyf0r4c0rVdU8L63HfWEstnf2rkdMFT0KsD+IINdX4r+L/inxhpbabePa2tm/8ArY7SMp5uOQGLMTj2zQBwtvFJNdRRRZMjuFXHXJPFffNfLnwQ8BXOt+JYPEN5AV0vT38yNmHE0w+6F9Qp5PuAK+o6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigApCM0tFAGNrvhPQfE0aprOlW15tGFeRfnUezD5h+BrAtfg94BtJRJH4dhZgc/vZpJB+TMRXcUUAQNZWr2LWLW0LWjRmJoCgMZQjBUr0xjjHTFZUHgvwta3EVxb+GtGhnicPHJHYRKyMDkEELkEHnNblFABXN+PPE7eD/Bt9rccSTSwbBHE5IDszhccfUn8K6SsbxN4X0rxfpH9maxA8ttvEihJGQqwBAOQfQnrxQB4D4j/AGhtU1jRZ7DTdHj02WdDG9z9pMrKp67RtXBx35x9a4/4YeCpfGvjCCB4idNtSJr18cbAeE+rEY+mT2r26L9njwbHMJHudXlXOfLe4TafyQH9a9N0zSrDRtPisNNtIrW0iGEiiXAHv7n36mgC5RRRQAVjX3hLw5qd495f6BpV1dSY3zT2ccjtgYGWIJPAA/CtmigCvY2FnplnHZ2FrBaWsedkMEYjRcnJwowBkkn8azr7wl4c1O8e8v8AQNKurmTG+a4s45HbAAGWIyeAB+FbNFAFexsLPTLNLSwtYLW2jzshgjEaLk5OFAAGSSfxqnqXhrQtZuFuNU0TTb6dUCLJdWqSsFyTgFgTjJJx7mtSigClpukabo1u1vpen2ljAz+YYrWFYlLYA3YUAZwAM+wrI1f4f+E9dkaXUdAsZZWOWlWPY7fVlwT+ddJRQBwsHwb8AW8gdPDsZIOcSXEzj8mciu0tbS3srdLe1giggQYSKJAqqPYCpqKACuX1v4c+EfEVy9zqehWstw/LyoDE7H1LIQSfrXUUUAchpvwt8EaTMJrTw5Z+YpyDPumx9N5NdcQCCMDFLRQBxV78JPAeoStLP4ctlYnJ8h3hH5IwFJa/CLwFaOHj8OW7Ef8APWSSQfkzEV21FAFax0+z0y3FvYWlvawA5EUEYRc/QcdqfcWsF5bPb3UMc8Eg2vHKgZXHoQeoqaigDldK+G/hHQ9ai1jTNFitr6LdskSSTC7lKnC7tvQkdK6h0WRGR1DIwIZWGQQe1OooA5CL4XeC4NWt9Ug0GCG8t5VmieF3QK6nIO0MF4I9K39U0HSNb8r+1dKsb/yc+X9qt0l2ZxnG4HGcDOPQVoUUAZ2maDpGieb/AGVpVjYGbHmfZbdIt+M4ztAzjJ/M0anoOka35X9raVY3/k58v7VbpLszjONwOM4HT0FaNFAGfpehaRonm/2VpVjYedjzPstukW/GcZ2gZxk4z6mn6lpGm6zbLb6pp9pfQK4dY7mFZFDAEAgMCM4JGfertFAGXpvhrQtGuWuNL0XTrGdk2NJbWqRMVJBxlQDjIHHsK574qeLLrwb4Il1OwaNb1p4oYTIu5clstkd/lVq7WsnX/DOjeKLEWWtWEd5bhtyq5KlT0yCCCDye/egDw61/aWvEjAu/DEEsndorwxj8ijfzrzzx98SNV+IF3A15FFbWdtnybaIkgE9SSfvHgDPHH1Ofcrj9nnwZM+6OfVrcf3YrhCP/AB5Ca1NG+CPgfR5lmOny38iHKm9l8xfxUAKfxBoA5T9nfwvcWGl6j4huoigvtsNruGC0aklm+hOAP9017fRiigArGvvCXhzU7uS7v9A0u6upMb5p7OOR2wMDLMpJ4AHPpWzRQBT03SdO0a3a30uwtbGBnLtHawrEpYgDJCgDOABn2FXKKKACs/VNB0jW/K/tXSrG/wDJz5f2q3SXZnGcbgcZwM49BWhRQBnaZoOkaJ5v9laVY2Bmx5n2W3SLfjOM7QM4yfzNaGOOtLRQBxt98KPAuoXHnz+HLRXzn9yXhH5IQP0rsqKKAOS1X4Y+C9anae98PWjSucs8W6EsfUlCM11uKKKAM/VtC0rXrX7Nq2n217COQs8YbafUE9D7iuWt/g74BtZxNH4dhZwc4kmlkX/vlmI/Su5ooAYkSRIqRqqooCqoGAo9AKfRRQBn6noek60sa6rpllfLGSYxdW6yhCeuNwOM1Jp2lafpFsbbTbG2soCxYxW0KxLuPU4UAZ4H5VcooAp6jpOnaxbrb6nYWt7ArBxFcwrKoYd8MCM8nn3pmmaJpWipImlaZZ2KyEF1tYFiDEdM7QM1fooAKKKKAEIyc1ga94G8M+J5PN1jRra6lxjzSCkmPTepBx+NdBRQBxdj8JPAenyiSDw5bMw/57u8w/J2IrswABgcD0FLRQBj654V0LxJEses6Va3oThGlT51Hsw5H4GsKx+EfgPTpxNB4ctmYHOJ3kmX/vl2IrtaKAEAwMCloooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigArzTUPjv4Gss+TfXN6R2t7Zv5vtFemCvgCgD7+ooooAKKKKACiiigArF8VeJrDwj4fuNY1F8QxDCoD80rn7qL7n/AOv0Brar5B+L3jF/FvjW4WGUtp2nsba2AOVOD8zj/eI6+gFAH074T8aaJ4004XmkXQcqB5sD/LLCfRl/qMg9jWNrXxg8FaFeXFnc6q0l3byNHLBDbuxVlOCM425BHrVP4N+DV8K+CoZ54wNR1JVuJyRyqkfIn4A5+rGvmfx3/wAlD8S/9hW6/wDRrUAfaOk6lFrGj2OqW6usF5bx3EayABgrqGGcEgHB9T9au1z/AIE/5J54Z/7BVr/6KWugoAKKK+cfGHx28U6d4l1bSLC306CKzvJrdJTEzuQjlcnLY7elAH0dRWR4UvrnU/B2iahePvurqwgmmfaBudo1YnA4HJNa9ABRXzj4v+O3irTvEmraTYW2nQRWd5NbpKYmd2COVBOWx29K938KX1xqfg7Q7+7k8y5utPgmmfaBudo1LHA4HJPSgDXooooAKKKKACiiigAooooAKKKKACszxFrUHhzw7qGsXI3RWcDS7c43kDhc+pOB+NadeGftG+IvI0zTPDsT/PcSG6nAP8C8KD7Elv8AvmgBNL/aIuNW1az0228IZnupkhj/AOJj3YgD/ll717pXzL+zz4d+3+L7vW5UzFpsO2Mn/nrICB+Sh/zFfTVAGX4j1uDw54d1DWLgborOBpducbyB8q57ZOB+NeP6V+0Pcavq1np1r4R3T3UyQxj+0e7EAf8ALL3pf2jvEPkaVpfh6J8PcObqcA/wL8qg+xJY/wDAK5j9nfw99v8AF15rcqZi02HbGT/z1kyP/QQ/5igD6aooooAKhurqCxtJru6lSG3hQySSOcKigZJJ+lTVzvjvQJvFHgjVdGt5FjnuYh5TMcDerBgD7EqB+NAHN+HvjX4R8Ra1HpUMl3azzPsge7iVElbsAQxwT2zjnjrXo1fH/hb4VeLdW8R21tcaNe2NskqtPcXUTRqiA8lSR8x9AM5+nNfV3iLUpNH8MatqkSK8llZzXCo3RiiFsH8qANKivmjQvjd4x17xlodg8llbWt1qEEMsdvb/AHkaRVYZcsRwTyK+l6AIL27g0+xuL26lEVvbxtLLI3RFUEkn2AFcFB8avBt5rVlpdhc3d3PeXEdvG0dsyrudgoJL7eMn3rpPHf8AyTzxN/2Crr/0U1fIPgT/AJKF4a/7Ctr/AOjVoA+3qKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIri4itLaW5ncRwxIZJHPRVAyT+VeDL+0uzMFXwgWJOABqPX/yFXV/HvxD/ZHgA6fG+LjVJRCBnkRr8zn/ANBX/gVeMfBXw9/b3xJsXkTdb6eDeSZHGV+5/wCPlT+BoA+uh0560UUUAFFFFABRRRQAUUUUAFFFFABXlXxS+L0Xg2Q6RpMcd1rJXdIZOY7cEcbgOrEc4/E+h9Vr4Iubqa8u5rq5kMk8zmSR26sxOST+NAHQ3PxG8aXdwZpPFGqq2c4iumjX/vlSB+ld74F+O+s6ffw2fieX7fpzsFNyVAmhHrx98DuDz6HtXo+l/Er4TaLpSaZp+owQWipsMa6fN8477v3fzE9yetfN3ixNFj8U6gPDtx52kNLutn2MuFIB24YBuCSvPpQB9x18g6l8Z/Heou3/ABOjaof+WdrCiY/4Fjd+tfSHwt1GTVfhloF1K26QWwiLE5J8slOf++a+L6AOjPj7xiX3HxVrWfa+kx+W7Fd/4H+O2t6dqMFr4lnGoaa7BXnZQJoR/eyMbgO4Iz6GvpWW0tprdraW2ikgZdpiaMFSPQg8V8M6/p40jxFqemhiwtLuWAE9Tscr/SgD7s7Zr51+Mnj7xf4f8b3Gk6fq8lpp5hjliSKNA3K8/NjP3g3evdPCt2994P0S8kOZLiwglYnuWjUmsLxd4p8DeGNQS719rE6oIwkYFuJbjZkkDgEqMk4zgcmgD5ZX4geMVk8weKdZJ972Qj8s4r2L4VfGjUNW1m38P+JnjlkuTstr1VCsX7I4HHPQEAc+uci1rPxo+HnibTptL1TSdTe3mQp5klrGdmR95TvJBHByBXzrbztbXUU6Eq8Th1I6gg5oA+2fHf8AyTzxL/2Crr/0U1fFOn31xpepWuoWjhLm1mSeJiAdrqQynB4PIFfa3jv/AJJ54l/7BV1/6KavjzwZBDdeOvD9vcRJLDLqVskkcihldTKoIIPUEdqALUnxD8Zyz+a3inV9+c4S7dV/75Bx+lerfDT44X1xqdvoviqWOWO4YRw6hgIyOeAJMcEHgbsDHfPb3W90nTtRsHsbywt57VlKmGSMFcfTHFfDerWD6VrF7p0hy9pcSQMcYyUYqT+lAH3iOpr4Br7x0S//ALU0HTtQIx9qto58em5Q39a+DqAPv6vKvin8XYvBkv8AZGkxxXOssoLmTmO3BHG4d2Pp6cntn1U18EXNzLe3ctzO5eaZ2kkc9WZjkn8yaAOhufiN40u5zNJ4o1ZWPaK5aNf++VIH6V3vgb476zp9/FaeJ5ft+nOwU3OwCaEdM/LjePUHn0PGK9G0r4l/CfRNKj0zT9SghtETYUXT5/m4xlv3fzE9yetfN/ixNFj8UaiPDs/naQZd1s21lwpAO3DAH5SSvPXH40AfcQ7dc0tcj8LtRk1X4Y6BdSsWcW3kkk5J8tjHz7/LXXUAZviDVBonhzU9UbH+iWsk4B7lVJA/EiviTQtLfWfEGnaWhIa8uY4AR23MBn8M19Z/GWVofhLrzKcEpEv4GZAf0NfO3wahWb4taCj4IDyv+KwuR+ooA+w6+IfHf/JQvEv/AGFbr/0a1fb1fEPjv/koXiX/ALCt1/6NagCQePfFqWltaw+I9TggtoliijguWiVUUAKMKR2Heur8K/G/xToV5GNTu31bTycSRXGDIB6q/XP1yP519E+ArS2T4eeHglvCol0y2Z8IPmJiUkn1zXyl8StFt/DvxE1rTLRBHbRzB4ox0RXUOFHsA2B9KAPtCKSOaNJYnDxuoZXU5DA8gj1r4l8df8lD8Tf9hW6/9GtX1N8H7uS9+FGgSyEllieLJ9EkdB+iivlnx1/yUPxN/wBhW6/9GtQBr2Pxe8daZp1rYWeu+XbWsSwwp9kgbaigBRkoScADrXv/AMFPFGs+LfB13f65efa7qPUHhV/KSPCCOMgYQAdWP51c8GeDfC114F8P3Fz4a0eaeXTbZ5JJLGJmdjEpJJK5JPrXX6bpOm6Pbtb6Xp9pYwM5do7WFYlLEAZIUAZwAM+woA+LfHf/ACULxL/2Fbr/ANGtWxYfF7x3pmnW1hZ675draxJDCn2SA7UUAKMlMnAA61j+Ov8AkoXiX/sK3X/o1q+ovBng3wrdeBfD9xceGdGmnl0y2eSWSwiZnYxKSSSuSSec0AU/gr4o1nxb4NvL/W7z7Xcx6g8Kv5aR4QRxkDCgDqxPTPNeReOPi740g8WazplpqotLW0vZreNYYEDbUdlGWIJzgetfTGm6Tp2j2zW2mafa2MDOXMdtCsaljjJwoAzwOfavi7x3/wAlD8S/9hW6/wDRrUASP4/8Yu+4+KtZz14vZAPyBxXaeE/jx4k0e5jj1xxq1gWAfeoWZB6qwxuPs2c+or6C8CwxH4deHFMSFW0q2LAqMHMS5zXzp8bfBdn4T8V28+mQrBYajG0iwr92N1OHCjsOVOO2T0GKAPqi1u4L20iu7WVJoJkDxyIcq6kZBB9MV458WPjHdeG9Rk8P+HfLF/GB9ou3UOISRkKgPBbGCSQQM4+j/wBnLWJbvwpqmlyMWWxuVePJ+6sgJx9Nysf+BGvFfiZYT6d8SvEMVwrK0l9JOpPdXJdSPwagCvJ8QfGUkpkbxTrAbOcLeSKPyBxXr3wP8a+LfE3ia6sdU1V7zT7e1MrCWNS27coUb8Z7k8ntVbwF8eNO0jQrDRdd0yaNLOFYEubXDBlUYBZDjBx1IJz6V7jouuaR4hs/t+j30F5C2FMkTZI9mHUHnoaAPM/jN8T9T8G3Vno2iCOO9uIftElxKgby0LFVCg8Ekq2cg4wOOeD4M/E/U/GdzeaRrYjkvbeH7RHcRoE8xNwVgwHGQWXpjg9OOen+IXwz0z4gwW7XE8lnfWwKxXMahvlPVWXjIzyORgnryRR8Pfhnpnw/gneCeS8vrgBZbmRQvyjnaqjO0Z56nPHPAoA7ivin4ieIf+Eo8eatqavugaYxQY6eWnyrj6gZ/Gvrnxnr6eGPB2qawWAe3gYxA95D8qD8WIr4w8P6PNr/AIh0/SYMiS7nSLI/hBPLfgMn8KAPrX4SeHv+Ec+HGlwOm24ul+1zf70mCAfcLtH4V29ArC8Z68vhjwdqusFgHt4GMWe8h+VB/wB9EUAfI3xD8Q/8JR471bU1fdA0xjg548tPlXH1Az+NfT3wk8Pf8I58ONMgdAtxdL9sn7EtJyM+4XaPwr5M8P6PN4g8Q6fpEGfMu50i3AZ2gnlvwGT+FfXXxV1KXSfhhr11AxWTyBCGHBHmMsZx7/NQB5H47+PmoT3s1j4S2W1ohK/bpEDSSkcZUHIVfTIJ6HjpXmTfEDxk0nmHxVrIY9heyAflnFL4B8Px+KfHOk6NOxWC4lJlwcEoqlmAPuFIr7QgsbS2t1toLWGK3AwIo4wqgemBxQB8k6b8ZvHenOv/ABO2uUHVLmJHB/HG79a+p/Fl7cab4O1y+s5PKurXT55oXwDtdY2KnB4OCB14r4aNfb3jv/knniX/ALBV1/6KagD538KfF3x1qfjLQ7C713zLa61CCGZPskA3I0ihhkJkcHHFfQ/jr/knnib/ALBV1/6KavkHwJ/yULw1/wBhW1/9GrX1/wCO/wDknniX/sFXX/opqAPimwvbjTtQtb+0k8q5tpVmhkwDtdSCpweDyO/Fdv8A8Ls+If8A0MP/AJJW/wD8brnPBcMV1458PW9xEk0MmpWyPHIoZXUyqCpB4IIPSvsH/hBfCH/QqaH/AOC6L/4mgBfHf/JPPEv/AGCrr/0U1fE9leT6ffW97aymK5t5VlikHVXU5B/AgV9seO/+SeeJf+wVdf8Aopq+PfBEaTePvDkciK8b6pbKysMhgZVyCO9AFhPiF4yWbzf+Ep1jdnODeSEflnFe2fCj4yXfiHU4vD/iPy2vJQRbXaAJ5jAZ2uBwGIzgjGcYxzXqXibw3p/iTw9d6Xd2kUiyRMsZKDMbkcMp7EHFfEthdvY6ja3cRxJbzLKpHYqQR/KgD7d8WXtxpng3XL+zk8q5trCeaF9oO11jYqcHIOCB1r5x8J/F7x1qXjLQ7C71zzLW51CCGaP7JANyNIoYZCZ6E9K+h/Hf/JPfEv8A2Crr/wBFNXyB4E/5KH4a/wCwra/+jVoA+3elfIX/AAur4hH/AJmH/wAk7f8A+Ir6/r4AoA+3/Hf/ACTzxL/2Crr/ANFNXxTp99caXqVrqFo4S5tZkniYgHa6kMpweDyBX2t47/5J54l/7BV1/wCimr488GQQ3Xjrw/b3ESSwy6lbJJHIoZXUyqCCD1BHagC1J8Q/Gcs/mt4p1ffnOEu3Vf8AvkHH6V6t8NPjhfXGp2+i+KpY5Y7hhHDqGAjI54AkxwQeBuwMd89vdb3SdO1GwexvLC3ntWUqYZIwVx9McV8N6tYPpWsXunSHL2lxJAxxjJRipP6UAfeA6mlqjot//amg6dqBGPtVtHPj03KG/rV6gDhfiR8SrDwBp8YMYutVuFJt7UNjj++57L+pPA7kfNWpfFDxtqk5lm8S6hFnottKYFH4JisrxZrcviLxXqmrTOXNzcMyZ7IDhB+CgCvq/wCHngPTPB/hyzRbKJtTeJXurlkBdnIyQCeijOABjpnqSaAPFPhZ4+8a6n460vRn1ye6tJ5SZ0ugJT5aqWbDEbgcA9+pr6E8WeKdO8HaDNq+pyERIdqRpgtK56KoPfg/gCa0E0uwTUBfrY2wvApQXAiUSbTjI3YzjgflXzJ+0DrMt98Qhppc+Rp1uiKnYM4DsfqQVH4CgDE174v+NNcuXkGsS6fCT8sFiTEqD03D5j+JpuhfF7xrodykn9szX0QPzQ3zecGH1PzD8DXrXwC8G6fF4Y/4Sa5tY5r66mdYHkUN5Uanb8uehLA8+mKT4+eDNPl8Mf8ACS2trFDe2kqLO8ahfNjc7fmx1IYrg+maAPT/AAl4q07xj4fh1fTXJjf5ZI3xuicdVbHf+Ywa4L42+O9e8Fw6MmhzxQNe+f5sjxByNnl4xnIH3z29K8x/Z/1iWx+In9nBz5Oo20iMnYsg3g/UAMPxrqP2menhb/t7/wDaNAHlV18SfGt25aTxRqin/plcNH+i4rQ0P4u+NNDuUlGtXF9GD88N85mVx6ZPzD8CK9D/AGaEVm8TsUBZfsuDjkf67/Csn9ojw9ZaV4g0rU7O3jgbUIpFmEagBnjK/MfchwPwFAHOeIfjR4z12d/K1I6ZbE/LDZfIQP8Af+8T+OPYVnaT8VPG2k3Cyx+Iry4APMd3IZlYenzZI/DBr0z9myK2nTX2ktoGngeApMYwXUMHyA3UD5elY/7RHh6y0rxDpep2dvHA2oQyLMI1ADPGV+bA7kOPyoA958HeKbPxj4ZtdZsgUWUFZYicmKQfeUn+vcEHvW9Xg/7NFyzWHiO1LHZHLbyAdgWDg/8AoI/Kvb9QvoNL026v7ptsFtC00jeiqMn+VAHyl8bvEP8AbnxIu4I33W+mqLNMH+Icv+O4kf8AARXr/wAAfD39leBG1OVMT6pKZMnr5SZVB+e4/Q18vRpNeXaRoGlnmcKB1Z2J/mSa+8rW1hsrOC0t0CQwRrHGo/hVRgD8hQBNRRRQAUUUUAFFFFABRRRQAUUUUAFfAZBRirKQwPIPBBr78rwz4r/Bi61jUZ/EPhmNGuZjvurIsF3t3dCeMnuDjnnvQBa0/wCAngPV7CG+sNa1i4tplDJJHcQkEf8Afrj6VO/7O3g2JGeTVNaVFGSzXEIA/wDIVfNt3p19p8pivbK4tpAcFJomQj8CKuab4Z13WHCabo9/dFjjMUDMPxOMD8aAPtHwx4cs/Cfh200SweZ7W1DhGmILncxY5IAHVj2FfDFff1fEP/CCeL/+hV1z/wAF8v8A8TQB9v18QeO/+Sh+Jf8AsK3X/o1q+3sivj3xp4L8U3XjrxDc2/hrWJYJdTuXjkjsZWV1MrEEELggjkGgD6j8C/8AJPPDX/YKtf8A0UtfEs00lxNJPM7ySyMWd3bJZjyST6k19u+DIJrXwL4et7iKSGeLTbZJI5FKsjCJQQQeQQeK+X/G/wAJfEPhXUrh7WwuNQ0osTDcwIZCqdhIBypA74wexoA91i+BngGK2ET6VNNIBjznu5dxPrgMFz+FfJFSGNw+wo27ptxzWvp/g/xJqxH2DQdRuFP8SWzlR9WxgUAfYnjv/knniX/sFXX/AKKavkDwJ/yULw1/2FbX/wBGrX2L4vtJ9Q8Fa7ZWsRlubjT7iKKMdXdo2AH4kivi++8M67pb4v8ARtQtiP8AntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe8eH7BtK8N6Xpz/etLSKA/VEC/wBK+EsV9+9K+O/GXwt8R+E9RnAsLi800OTDeQRl1Kdt+PunHUH9aAPsTcCMgg18ClWQlWUhhwQR0NfSH7OGn+T4X1m/ZSGnvFh9OI0B/wDah/Kq/wAWPg1c6zqM/iHwyiPdTfPdWZIXzGxy6E8ZPcHqee9AFnTvgL4C1ewhvtP1vWLm1mXdHLHcQkEf9+vzHbvViT9nfwZDG0kuq60iKMszXEIAHufLr5uvNPvdOm8q9s57aQcFJomRh+BxVvTvDGu6u6rp+j39yW7x27EficYH1oA+0fC/h2z8J+HbTRLB53trUNsadgXO5yxyQAOrHtWvRRQByPxSsW1H4YeIYFBJFqZcD/YIf/2Wvl34XXy6d8TvD87HAN2sOT28wFP/AGavs6SNJYnjkUMjgqynoQe1fB17ZXWkapcWdwpiurSZo3HdXU4P6igD7zr4h8d/8lC8S/8AYVuv/RrV9p6ZqNvq+l2mo2j77e6iWaNv9lgCP518keM/Bfiq68deIbi38M6zNBLqdy8ckdhKyuplYgghcEEc5oA+o/An/JPPDX/YKtf/AEUtfMHxt/5K9rv/AG7/APoiOvqLwZBNa+BfD1tcRSQzxabbJJHIpVkYRqCCDyCCMYr51+L/AIT8R6n8UtZvLDw/qt1ayeTsmgs5HRsQxg4IGDyCPwoA9n+CX/JINC/7eP8A0okr5j8fxPH8RfEqspBOp3Dc+hkYj+Yr6k+ENheaZ8LNGs7+0ntbqPz98NxGUdczyEZU4IyCD+Ned/Gn4V6nqmrv4n0C2a6aVALy2j5k3KMB1H8XAAIHOeecnAB6n8OL6C/+G3h2W3kV0SwhhYg9GRQjD8CprqK+B5IJo5fKkhkSQcbGUg/livo79nix1fTtJ1mHUNOurW2lkilt3niKCQ4YNjP0XmgDw3x9E0XxE8SKwwTqly34GRiP0NfWfw3voL/4b+HZYJFdY7CGFiD0eNQjD81NeXfGn4WanqmrN4n0C2a6eVALy2j/ANZuUYDqP4uAAQOcjPOTjwF4JopfKkhkSQHGxlIP5UAfe+RjPaviLx3/AMlD8S/9hW6/9GtXun7PFjq+naRrMWo6ddWtrLJFLbvPEUEhIYNjPXoteF+O/wDkofiX/sK3X/o1qAPr7wIQfh54aGf+YVa/+ilrwj9orXrXUPE2maTbypI+nQyGYqc7HkK/KfcBAfxrye70jUrCGCe70+6gimRZYpJYWVZFYZDKSMEEEVr+GfAfiTxZdxw6ZpkxiJ+a5lUpEg9Sx4/AZNAHs/7NlhJFoWu6iy4Se4jhU46+WpJ/9GCvUfFPg7QvGNitrrVkswQny5VO2SIn+6w6fTkHjir2gaJZeG9BtNIsEK21qmxc9WPUsfcnJPua+SPGGl+LLDxhqur3WmanYPcXcsyzLG4ADMSMOvHTHQ0AdT44+BOpeG9MutW0q/TULG2RpZY5F2SxoOSfRgByTwfauS+Fuq3Ok/ErQpbd2X7RdJayAHhkkIQg/mD9QKy59f8AE+twGxuNW1i/jf8A5YSXEsob/gJJr174PfCXVbLXYPEniG1azS2y9rayD9474wGYfwgZyAeScenIB7frviPR/DNiLzWtQhs4CdqtITlj6KByx+go0LxHo/iaxN7o2oRXkAO1mjyCp9GU4IPsRXh37RPhzVbjVtO123t5Z9PjtPs8pjUsIWDs2Wx0BDdeny0v7Ovh3VbfVdQ12e3lh0+S1+zxNIpAmYurZX1ACkZ6fN9aAJP2kPEPOk+HIn9bycfmqf8As/6VR/Zz8O/aNb1LxBMmUtIxbwEj+N+WI9wox/wOvLfGevN4n8Y6rrDElbmdjHntGPlQf98gV9a/Dbw7/wAIv8P9J05023Bi864BHPmP8zA/TO3/AIDQB1deAftH+Iv+QT4cif1vJwPxVP8A2f8ASvf6+H/GWvN4m8Y6rrDElbmcmLPaMfKg/BQKAPUf2cvD32nXNS8Qyx/u7SIW8BI/5aPyxHuFGP8Agde2ePdEfxF4E1nSoRmae3JiHq64ZR+agVX+G3h3/hGPAGk6c6bLgxCa4BHPmP8AMQfpnH4V1ZoA+FfD+tXXhvX7HWbPHn2kokUHow7qfYgkH619Az/tIeH1st1vompyXWP9XIY0jz/vgk/+O1veMPgp4a8VXst/E02mX0pLSSW4GyRj1LIe/uMZOc5rjx+zvpGlQzX2seJ7iSyt0aWTyrZYSFUZOWLN2HpQB8+n2r7e8d/8k88S/wDYKuv/AEU1fE1vC1zdQwIpZ5HVFUdyTivtzxnBNdeBfENvbwyTTzabcxxxRqWZ2MTAAAckk9qAPj3wJ/yULw1/2FbX/wBGrX2F42jabwF4iiQZd9LuVUDuTE1fLPgzwX4ptfHXh64uPDesQwRalbPJJJYyqqKJFJJJXAAAzX2AwDqQwBGMEHvQB8OeE7yHTvGOh31ywSC21CCaRj0CrIpJ/IGvuXvXxr4x+GXiLwffypNYzXVhuPlXkEZdGXtux90+x98ZGDXKW1ld3switbWaeVjgJFGWJ/ACgD7V8d/8k88S/wDYKuv/AEU1fIPgT/koXhr/ALCtr/6NWvq7UI7+8+Dt1BNaz/2nNoLxvb7CZDMbcgrtHJO7tXzb4M8F+KrXx14euLjw1rMMEWp2zySSWEqqiiVSSSVwABzmgD7Cr4Br7+zjmviH/hBPF/8A0Kut/wDgvl/+JoA+xfGMDXXgjX7dAS8um3CKB3JjYV8X+Gr9NJ8U6RqM3+qtL2Gd/ojhj/KvuknJxXyL46+EniDwpqM72djPf6SWJhuIELlF9HA5BHr0P6AA+usjFfANSCORn2+W5fONuOa3NN8D+KdXYCx8P6jKp/j+zsqf99EAfrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yULw1/2FbX/0atfYvi+0n1DwVrtlaxGW5uNPuIoox1d2jYAfiSK+L77wzrulvi/0bULYj/ntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe12k9j4O8F6eurXkNpb2FnDBJLKwABVAuPc8dB1p3h3xj4e8WRSPoeqQ3flY8xQCrrnplWAIHvivNv2g/DmqavoelX+nwS3EVhJL9ojiUsQHC4fA7Dafpn61wHwH8OardePbfWo7aVNOs45fMnYEI5ZCgQHucnPHTH0oA811vT30nXtQ06RSHtbmSEg/wCyxH9K+0fB/iWz8V+GLLVbWZHMkaiZQeY5ABuU+hB/TB6GuB+LfwkfxfINb0QxprCIElic7VuVHTnoGA454IwOMV816ho+p6TIYtS066tJF42zwsh/UUAfd2QOpFfKXx90yWy+Js92yny7+2imRscHavlkf+OfqK5LwnpWuTeIdOvNK0m9u3tbmKYeRAzAbWDZJAwOnevq/wCIHgaz8eeHjp1w4huYm8y1uNuTE+MdO6noR9D1AoA5H9n/AMQ2t/4EGi+aovNOlfMWeTG7Fw30yzD8PpR8f/ENrY+BDovmqbzUZkAiz8wRGDlsemVUfjXzvrvhLX/DV09vq2lXVuVON5QmNvdWHB/Om6J4T17xJdJBpGlXVyWON6xkIvuzn5QPqaAO1+Ammy3nxPtrpVOyxt5pmbsMqYx/6H+ldh+0z08Lf9vf/tGvUvh74Gs/Afh0WELia7mPmXVxjHmP2x6KBwPxPevPf2htB1jWx4c/snSr6/8AJ+0+Z9lt3l2Z8rGdoOM4OPpQBn/sy9fFP/bp/wC1qP2mv+ZX/wC3v/2jWh+zzoWr6J/wkf8Aa2lX1h532byvtVu0W/Hm5xuAzjI/Oj9obQtX1v8A4Rz+ydKvr/yftPm/Zbd5dmfKxnaDjOD+VAGf+zL/AMzT/wBun/taj9prr4X/AO3v/wBo1ofs86FrGif8JH/a2lX1h532Xy/tVu8W/Hm5xuAzjIz9RR+0NoWr63/wjn9laVfX/lfafM+y27y7M+VjO0HGcH8jQBn/ALM3TxTn/p0/9rVv/tCeIv7O8G2+ixPibU5vnH/TKPDH/wAe2frVD9nnQtX0T/hI/wC1tKvrDzvs3lfard4t+3zc43AZxkfnXlfxg8Q/8JD8SNSkR91vZkWcP0ThvzcsfxoA1fgT4e/tn4hRXsibrfS4zcHPI3n5UH1ySw/3a+rq81+Bvh3+xPh1b3UibbjU3N0+Rzs+6g+m0Bv+BGvSh70AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAJj3oxS0UAFIBjvS0UAJijbS0UAJilxmiigBMe9GKWigBMUtFFAARmkAxS0UAFJilooATFLiiigAoxRRQAmKWiigArwv4+fD97yAeLtNiLTQIEv0UctGPuyf8B6H2x6GvdKKAPGv2fIvEdr4euotQs3i0SRhNYyynaxY/e2r1KHg54Gc4zk49lpMUtACY560Y9TmlooAQDFKeaKKACkxzmlooATHuaWiigBMV8R+Ov+SheJT/ANRW6/8ARrV9u0mKAMDwL/yT3w1z/wAwq1/9FLW/ijApaAExS4oooAKTFLRQAm0GuX+IniD/AIRbwHqupq+24WExW5HXzH+VSPoTn8DXU0mKAPh3wjoL+JvFml6MgbF1OquR1EY5cj6KGP4V9x0YooA5f4ieIf8AhF/Aeramj7Z1hMcB7+Y/yqfwJz+FfInhDQX8TeLtL0dASt1OqyEdVjHLn8FBNfcZGaTHOaAFpsjiONnIOFBJwMninUUAfCFnqmqaPKwsr68sZAfm8mVojn3wRT9R17V9YCjU9Vvr4KcqLm4eTB/4ETX3XiloA+cPgz8LNQm1q28S65aPbWdqfMtYZ1w80nZtp5CjqCepxjIr6P8A0oxRQAm3ilHFJmlFACY5paKKACiiigApMdaWigBMUEZpaKADHvSYpaKAExS0UUABGaQDFLRQAUgUClooATHHWloooATFLRSZwcUAGKXHvRRQAUhGaWigBMe9GPelooATFGKWigDP1vVYdC0K+1W4P7mzgeZh0ztBOB7npXwzZWc+o6hb2Vsu+e4lWKNfVmIAH5mvvbqMUmM0AJHGkUaxRqERAFVQMAAdAKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFVrvULKwXfeXlvbL6zSqg/WsyPxl4WllEUfiXR3lPARb6Ik/huoA3KKQHP0o780ALRSUtABSHijPHXtXkf7RMskfw8svLkZd+qRq20kZHlS8H24oA9c5yRS18wfs5f8lCv/APsFSf8Ao2Kvp7nk0AcP8VfGt/4D8LW+p6fbW9xPNeLbbbgMVAKO2cAgn7g7964n4P8AxM8S+NvG15ZavcQG0j095lhhhCAOJIwDnlujHv3rQ/aO/wCSe6f/ANhWP/0VLXnX7Pl3bWPjvUZru4hghGlSZklcIo/exdzQB9SUVkWfinw9qMvlWOvaXcyE4CQXcbnP0DVr0AFFFQ3V1b2cJnuriKCFfvSSuFUfUnigCaismx8TaBqc/kWGuabdzf8APOC7SRvyBJrWoAKKSqF5rukacxW+1WxtiOonuUTH5mgDQorGt/Fvhu8lEVr4i0meQnASO9jYk+mA1bIoAKKKKACiioLm8trOLzbm5hgj/vSuFH5mgCeisL/hNPCu/Z/wk+jb/wC79viz/wChVto6yIro6srDIKnINADq+XfEvx58Vz+IJ20W4hstNjlKxR+QkhkUHq5YE5PoMYz+NfUR6V414j/Z80nWNem1Gw1aXTop5DJLbiASKGJydh3DaD6c4+nFAHpfhLxBH4p8KadrccXlC7i3NHnOxgSGGe+CDXhnjD47eKdO8SatpFhbabBFZXk1skpiZ3IRyuTlsdvSvoLSdMtNG0m102wiEVpbRiOJAc4A9+59TXxZ47/5KF4l/wCwrdf+jWoA+x/Cl9can4O0PULuTzLm60+CaZ9oG52jVmOBwOSeleP/ABK+M/iLwv4z1DQNLtNPWK2EeJpo2dzujV/7wHVsdK9Y8C/8k98Nf9gq1/8ARS18wfGz/kr2uf8Abv8A+k8dAH0h8MNc1DxJ8O9L1fVZhNe3HneZIECg7ZnUcAAdABXXVwHwS/5JDoX/AG8f+j5K7+gAoqlf6xpmlKG1HUrSzU9DcTrGD/30aTT9Y0zVkL6bqVneqOrW06yAfkTQBeopOaOvQ9aAFooHSk/GgBaKKTPPFAC1Fc3EVpaTXNw4jhhQySOeiqBkk/gKk9a8r+PniL+yPAX9mxPi41SURYzz5a/Mx/PaP+BUAfPB8deMZpePFGtlnPAW/lHJ7ABv0r7UgjSw0+GJ7hmSGJUMs75ZsDGWY9T7mvk74KeHf7e+JFlJIm6304G8kyOMqcIP++yp/A161+0d/wAk8sP+wrH/AOipaAPRZfGHhm3OJ/EekRH0e9jH82qWz8TaDqDhLLW9NuWPRYbtHP6GvibRNG1DxDq0Gl6Vbm4vZ9wjiDqu7apY8sQBgAnr2ra134c+LfDdm13quiXEFsv3pUZZVX/eKEgfjigD7T5xWL4v8Qp4U8J6jrckXm/ZItyx5wGYkKoJ9CxAr5T+HfxF1PwRrMGJ5JdIkcC5tCSV2k8so7MOvHXGK+utV0uz1rSrrTL+LzbS5jMciZ6g+nofQ9qAPmzwz8ePFUPiG3Ot3EN7p00oSWPyEQxqTyVKgHI9Dn+tfRviHU5NG8M6rqsUaySWVnNcKjHhiiFgD+VeXeHP2ftI0XXodSvtWl1GKCQSw23kCNSQcrvO47hnnAxn6cV6J46/5J54m/7BV1/6KagDwjQvjf4x17xlolhJJZW1rdahBDLHb2/3kaRQwyxYjgnkV9L18Q+Bf+SheGv+wra/+jVr7fNAHypqH7QPja7yLc6fYjsYbfcR/wB9lv5V9VV8A19/UAFFIT3rHufFnhuynNvdeIdKgmHBjlvY1YfgWzQBs0VHFNHcRLLDIkkbchkYEH6EU8nFAC0U3nI9KXPFAC0Un60UALXzP45+NHiuHxe1pZw/2TDplyQbVgGaYqefMPdSOw4wep4NfTFeEftF+FlksrDxRbx/vImFpckDqpyUY/Q5H/AhQB7TpGq2muaRa6pYSiW1uYxJG49D2PoR0I9avV86fs6+KWt9VvvDM7/urlftNuCeBIvDAfVcH/gFfRdABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFHaijtQB8BV2178JPHen2jXM/h2cxKMt5Uscrf98oxP6VxFff1AHxP4M8b6v4J1dLzTp2MJYfaLVmPlzL6Edj6MOR9Mg/U3xG0ePxb8NdTgtiJme3FzasvO5lw64/3gMf8Cr5m+LGmwaV8UNetbZQsXnrMFHQGRFkOPbLGvpP4O3Ml18J9BklJLCKSMZ9EldR+gFAHy14H1z/AIRvxtpGrFtscFwvmn/pm3yv/wCOlq+2Lq5isrOe6nbbDBG0kjeiqMk/kK+IvGPh+Twt4u1LRpAdttMRGT/FGeUP4qQa968e+PF1D4BWmoJKPtWsJHavtPRx/rePT5GH4j1oA+bIo5J50ijUtJIwVVHUk9BX0z+0PDHb/DXS4IlCRx6nEiKOgAhlAFeV/A/w+db+JFpPIm6301Gu3JHG4cJ+O4g/8BNer/tHf8k8sP8AsKx/+ipaAPm3TNJ1HWLhrfS9Pur6dULtFawtKwXIGSFB4yRz7iuv8GeDfFNr468Pz3HhrWIYYtSt5JJJLCVVRRIpJJK8ACuj/Zx/5KHf/wDYKk/9GxV9P0AeQftHf8k90/8A7Csf/oqWvnXQfD2reJ9QNjo1lJeXIjMhRCBhcgZJPA5YfnX0V+0d/wAk90//ALCsf/oqWvP/ANnH/koWof8AYKk/9GxUAefeI/BniHwnJGNc0uWzEpwjlldGPoGUkZ9s17J+z742urmW48KX0zSxxxedZs5yUAIDR/TkEemDXcfHGFJfhLq7sMmJ4HXPY+ci/wAmNeDfBFivxc0QDownB/78SH+lAH0/4y8UW3g/wte61cjf5C4jizgySHhV/E9fQAntXxlPPq/ijXDJK1zqOp3cmB1d3Y9gB29hwK9z/aXuZEsfDlqCfLklnkYdsqIwP/QzWd+zXYwyar4gvmUGaCGGJD3AcuT/AOgLQB5h4h8A+KfC1ql1rGjzW1u5AEoZZEBPQFlJA/GvT/gb8SrtNUi8J6xcPNbTgixllbLRuBny8nqpAOPQ8Drx79qdlDqWl3dhcKGguIWikUjgqwIP86+FLG5ks7+3uoSRLDKsiEdiCCP1FAH1v8bf+SQ67/27/wDo+OvlDRtHv/EGqw6Zplsbm9n3eXEGALYUseTgdATX1f8AG3/kkOu/9u//AKPjrwD4Jf8AJXtD/wC3j/0RJQBi+IfAHinwrbLda1o01tbsQvmh0kQE9AWQkA/Wu++A3ja70/xKnhi6nZ9Ovt3kKx4hlAJ+X0DYII9ce9e7/ECFJ/h14kRwCBptw4z6rGWH6gV8jeAnKfETw0VOD/alsPwMqg0AfblFeAftNf8AMrf9vf8A7RrwCgD7/FfAFe//ALM3TxR/26f+1q8AoA7qX4O+P4rdpn8OSlAMkJPEzfgoYk/gK57w94k1bwnqyahpN29vOhwy/wAMg7qy9CPr9RivuWvlP4/6dBYfEoywqFN5ZRXEgA/iyyfyQUAfTuiava69o1nqtk5a2u4hKmeoz1B9wePwr4w8d/8AJQ/Ev/YVuv8A0a1fQ37PFzJP8N5o3JKwahLGmewKI382NfPPjv8A5KH4l/7Ct1/6NagD6/8AAn/JPPDX/YKtf/RS18geO/8AkoXiX/sK3X/o1q+v/An/ACT3w1/2CrX/ANFLXyB47/5KF4l/7Ct1/wCjWoA+v/Av/JPfDX/YKtf/AEUtfMHxt/5K9rn/AG7/APoiOvp7wL/yT3w1/wBgq1/9FLXzD8bP+Sva5/27/wDoiOgD3/4Jf8kh0L/t4/8AR8lUfjH8Q5fBWiQ2emOq6vf5ET4B8iMfefB784H4ntir3wS/5JDoX/bx/wCj5K8H+OlzJP8AFjU43JKwRQRp7Dylb+bGgDj9D8Oa14pvmtdHsJr24A3vsxhfdmJwPxIo1zw5rXhe/W11iwmsbj7yb8YYDurDg/UGvp34E2UNr8KrGaNcPdzTSyH1YSMg/RBR8drKG6+FeoTSqDJazQyxH0YyKn8nNAEfwb+IcvjTRJrLU3DavYACR8Y8+M8K+PUdD+B718+/E7RD4f8AiLrVmqbYmnM8XHGyT5wB9M4/Ctj4G3EkHxY0uNCQs8c8b+48pm/morv/ANpHQd9rpHiCNOY2azmb2PzJ/J/zFAHqfw81j+3vh9oeoFt8j2qxyN6unyMf++lNfJfj/Wv+Eh8e61qatujluWWI+safIn/jqivWvgb4vi0jwF4niuWBGlg3yKx+8rIRtH/AkH4tXjOkeH7zWdM1i+tlzFpdsLiXjqC6rj8izfRTQB9ffDnWP7d+Hmh35bc7WqxyHuXT5GP4lSfxr5D8Xav/AG94w1fVA25Lq7kkjP8AsbjtH/fOK9h+CHi+PR/AHimKdhnTFN9GrH7wZMbR/wACQf8AfVeVfD7QD4l8eaRpbLuhecSTD/pmnzN+ikfjQB9i+HNITQPDWm6THjFpbJESO7Acn8Tk/jXy18bfEP8AbnxIvIY33W+nKLNP95eX/wDHyw/AV9XajfwaVpl3qF022C1haaRv9lQSf5V8IxRzXl2kUYaSaZwqjqWYnA/HJoA+o/gF4e/snwCdSkTE+qTGXPfy1+VB+e4/8Cqr+0d/yTyw/wCwrH/6Klr1q1tobO0htbdAkMCLHGg/hUDAH5V5L+0d/wAk8sP+wrH/AOipaAPIPgl/yV7Qv+3j/wBJ5K+rdd1Kx0jQr2+1J0SzhiZpd+MMMfd9yemO+a+ExirVppt9qEgjsrK5uXPRYYmcn8AKAKlfb3jv/knviX/sFXX/AKKavFPht8D9SfVbbWPFUC21rAwkSxYhnlYcjeBwq9OOp6YFe1+O/wDknvib/sFXX/opqAPkHwJ/yULw1/2FbX/0atfX/jv/AJJ74m/7BV1/6KavkDwJ/wAlC8Nf9hW1/wDRq19f+O/+Se+Jv+wVdf8AopqAPkDwJ/yULw1/2FbX/wBGrX2/2r4g8C/8lC8Nf9hW1/8ARq19vnpQB8AV9/V8A19/etAHyz8Y/iVd+I9cudC064aPRrRzG3ltj7TIpILE91B4A6HGe4xyejfDbxhr+mrqOm6FPNaMMpIzIgceqhiCw+ma5Pmvv1RgADjjigD4a0LX9X8J60l9ptxJaXcLYdTwGAPKOvceoP8AOvsS6Fl478ATC2cNa6tYt5bMPull4z7q36ivnj9oKyhtfiUssSgNdWMU0hHdtzp/JFr1H9ni5kn+G00chJWDUZY4/ZSiN/NjQB85eEtXOg+L9I1UttW2uo5HP+xu+Yfiua+yvGWsjw94M1fVd+17a1doz/00Iwg/76Ir5B+IGg/8I1481jSwmyKO4Lwjt5b/ADJ+SsB+Fet/G3ximq/DnwzFA4B1dVvZVU9FVB8p/wCBP+a0AeMeE9X/ALB8XaTquSEtbqOR/dNw3D8s19f/ABE1f+wvh7rt+G2utq0cbejv8in/AL6YV8f6v4evNF07R7y5XEeqWpuYuMYG9lx9cBW+jCvY/jb4wTWPh74WigcZ1RVvpFU9AqAbT/wJz+K0AeafC/Q/+Eg+I+i2bLuhScTygjI2R/OQfY4A/GvrDxrpiaz4J1rT2UMZrOUJn++Fyp/BgK8l/Zu0HZaav4glTmR1s4GPXC/M/wCeU/75Ne6ylRE5f7oU5+lAHw/4O1JtI8aaLfqcCC9iZv8Ad3AMPyzX3H+Oa+BIgzSoF+8WGD7199mgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACjtRR2oA+Al69M+3rX1m3x28BfYzONSuDKBkW/wBkk3k+mcbf1r5MBxXs91+zf4jScraazpUsOeGmMkbfkFb+dAHlfiLWrjxH4hv9YuVCzXkzSlF6KD0UewGB+FfZ3gzRW8O+DNI0mQAS21siy46eYRl//Hia4LwH8DdM8LX0WqatdDU9QiO6JRHthib1weWI7E4x6ZANem6lqdno+nS6hqNwlvaQgGSVuignHP4kUAeQftDeFDf6JaeJbaPM1gfJuMd4WPB/Bj/4+fSvnJ7mZ7SK1aRjBG7SJGTwrMAGI+oVfyr688V/FDwdpvh27kGrafqjyRMiWdtOspmJGNrbSdqnPJPbPWvk/QtGuvEGu2Wk2S7p7qVY144XPUn2AyT9KAPpz4E+Gzonw+S+lTbc6rIbg5HPljhB+WWH+9TP2gLCS8+GRmRSRaXsU747Ahk/m4r1FEWONURQqqMADoBTbi3iureW3njSWGVCkkbjKspGCCO4xQB8dfCzxjb+CPGkepXqSNZSwtb3BjGWVWIIIHfDKv4Zr6GHxr8DzX1raWmoz3U1zMkK+XbOoUswUE7wvAz78V55rn7N14Ll20HW4GgJysV8rKyj03KDu/75FQaV+zfrBuFbU9esrdFOc2iPK34bguKAOu/aO/5J7p//AGFY/wD0VLXAfs4jHxCv8/8AQKk/9GxV7d8RvAv/AAsDw9b6V/aP2DyrpbnzfI83OEdduNy/385z2rA+HHwgPgDxDcasdd+3+datb+V9k8rGWRt2d7f3MYx3oA0PjZ/ySHXf+3f/ANHx14B8Ev8Akr2hf9vH/oiSvp7xt4Z/4THwhfaD9s+x/avL/f8AleZt2yK/3cjOduOveuA8E/Az/hDvF1jr3/CRfbPsvmfuPsPl7t0bJ97zDjG7PTtQBd+PPhqbXPAgvrWMvPpcv2hlAyTERh8fT5W+imvCvhf46HgPxSbyeKSawuY/JuY48FgM5DKO5B/QmvsbHNeKeLf2erDUbqS88OX4055CSbWVS0IJ/ukcqPbn2wOKAHePPjl4ePhm7svDdzLeX93E0Ky+S0aQhhgt84BJwTjGef18W+HHhubxT470uwSMtAkqz3JxkLEhBbPpn7o92Fd/Y/s3+IJLgDUNb0yGHPLQeZK2PoVUfrXu/hXwho/gzSf7P0e28tCd0krnMkrert3/AJDsKAOd+NpB+EOuj/r3/wDR8deAfBP/AJK9of8A28f+iJK+nvG3hj/hMfCF9oP2v7H9q8v9/wCX5m3bIr/dyM5246964DwT8Df+EO8XWOvf8JH9r+y+Z+5+xeXv3Rsn3vMOMbs9D0oA9A8df8k98S/9gq6/9FNXyB4FH/FwvDX/AGFbX/0atfZ2u6Z/bXh7UtK87yfttrLbebt3bN6Fd2MjOM5xkV5BoX7PX9i+IdN1X/hKPO+xXUVz5X2Dbv2MGxnzDjOMZxQB7fnPTmiiigBMHnFfAVff4r4AoA+s7X47+A57QTS6hc20mMmCW0kLfTKgr+tfOXj3xZJ418XXestE0MT4jgiY5KRrwAfc8k+5Nejah+zdrsc5Gm63p08PY3KvE35KG/nXT+Dv2fbHSr2O+8R3qai8ZDJaxIViyP7xPLD2wB656UAdj8H9Bl8P/DbTILhClzchrqRSMEFzkAjt8u38a+W/HfPxD8S/9hW6/wDRrV9ugYrxHXf2ef7a8Q6nqv8AwlHk/bbuW58r7Bu2b3LYz5gzjOM45oA9Q8C/8k88Nf8AYKtf/RS18i/EGB7f4jeJEkUgnUp3/BnLD9CK+ydC0z+xfD2maT53nfYrWK283bt37EC7sZOM4zjJrjPiF8JdK8dypffaHsNURQn2hE3LIo6B1yM47EEH60AZXwj+Juja1o+j+GHM0Or29sLdYzGSkixp94MMgfKvfHNePfHG3eH4s6s7KQsyQOp9R5KL/NTXq3wu+EGq+B/F0+rale2FzD9leGEQM+8MzLyQVAAwCOp612Pj/wCHGk+P7OJbx3tr2DPkXcQyyg/wsP4l7449iMnIB518E/iZo9roWn+EdQ82C+WdorVhGXSXzHLAEjODliOeMY5rD/aK8Ny2viKy8RRRk215EIJWA+7KmcZ+q4x/uGt/wN8DtY8LeO9P1m81HT7mytGdysZcSElGC8FcdSD1r2y+sbbUrKazvYI7i2mQpJFIuVdT2IoA+b/g/wDFfT/CWnzaHr3mpYtKZbe4RC/lE43KQOccZGM8k+tJ8Yvivp3i3T4dD0AyvZCQSz3DoU8wgHaqg845ycgcgVq65+zdc/aXfQNbhMDHKxXylWT23IDn8hTdE/Zuuzco+v63AsAOWisVZmb23MBj8jQBR/Z28NzXXiW88QyRkWtlEYI2I4aV8ZwfZc5/3hXu/jPQR4n8G6ro5AMlzAwiz0Eg+ZD/AN9AVq2Fha6ZYw2Vjbx29tCuyOKNcKo9qsUAfA8U8sMcyRSMqTJskAPDruDYP4qD+Ar6g+B3heO1+GMsl5CCdaeR5FI5MONij6EBj/wKsPU/2b7e81S7ubTxJ9kt5pmkjt/sG/ylJyFz5gzjpnFe329tFaWkVtAgSGFBHGo6KoGAPyFAHwdvubI3NsS8RceVMnTIDBtp/wCBKD+Ar339m/w6Y7XVvEcycyMLO3JHYYZ/wJ2D/gJrV8W/AODxL4ov9Zg1/wCwreSea0H2LzNrEfMd28Zycnp3r1Hw7olv4b8OWGjWpzFaQrGGxjee7Y7EnJ/GgDzH9obxD/Z3g+10WJ8TalPlwP8AnlHgn/x4p+RrzP4D+Hf7Z+IKX8ibrfS4jOcjjzD8qD65JYf7tZPxf8QnxF8SNSlR91vZt9jg9AEyGx9WLH8a96+B3h3+w/hzbXMibbnU3N2+RzsPCD6bQG/4EaAPSa8g/aN/5J5Yf9hWP/0VLXr9cf8AEfwL/wALA8PQaV/aP2DybpbnzfI83OEdduNy4+/nOe1AHzj8Ev8Akruh/wDbf/0RJX19XkHgn4Gnwf4vsdfPiP7Z9l8z9x9i8vdujZPveYcY3Z6dq9foAK5/x3/yT3xL/wBgq6/9FNXQVn67pn9t+HtT0rzvJ+3Wktt5u3ds3oV3YyM4znGRQB8Y+Bf+SheGv+wra/8Ao1a+xPGUD3XgfxBbxgmSXTbhFA7kxsBXlWhfs8/2J4h03Vf+Eo8/7FdRXPlf2ft37HDbc+YcZxjODXt/86APg/RdRfR9d0/U0Te9ncx3AQnG4owbH6V9reGPFWk+L9I/tPR5nltg5jYvGyFXABIwRz94cjivIvE37OcNzeS3PhzVVtY3JItbpSyofQOOcexBPua9N+HXha48HeCbLRruSGS6iaR5XhJKMzOSMEgHoQOnagD4sHFfa/g3x3ovjmzmuNIkmL2+3z4poirRls4BPQ9D0J6V594y+AFjrWoTajoeoLp08zF5LeSPdEWPUrjlR3xzXVfCfwJe+AvDt3Y6jNazXVxdmbfbMxXZtUAfMoOcg/nQB8peIdDuvDniC+0i8Uia1laMnGAw7MPYjBH1r6S8P/Hvwnd6NFLrNxLYagqATQiB5FZu5QqDwffGP1rrvGngDRfHVgsGqRMk8QPkXUJxJHn3PUeoP6GvErn9m/xIs5FrrGkyxZ4aUyI2PoEb+dAHnfjjxTN408W3mtSRmJJSFhiJz5cajCj69z7k19TfCjw5L4Y+HenWdyhS6mBuZ0IwVZ+dp9wu0H6VzHgn4D6T4eu49R1q5/tW8jYNHHs2woR3IPLn64HtXrg6UAeB/tH+HS0OleI4k5QmznI9Dlk/Xf8AmK8GDXV89ragvMygQwR+mWJ2j6sxP1NfcHiXQoPE3hrUNFuW2x3cJj34zsbqrY74IBx7V5j4Q+AkPhjxTY61Prwv1tHMiwfYvLBbBCnPmHoSD07UAWvjr4Yju/htDcWcIB0Z0ZFUdISAhA/8dP0WvmCSeWaOFJJGZYU2Rg/wKWLYH4sx/GvvG8tIb6yuLO4QPBcRtFIp7qwwR+RrxTSP2crew1ezvLvxH9st4Jklkt/sGzzQpzt3eYcA9OlAHq3g3QV8MeD9L0cAB7aBVkx0Mh5c/ixJqt4/1ZNE8Aa5fs20paOiHP8AG42L/wCPMK6Wvnr9orxWs1zY+FraQHySLq7wejEEIp/Alv8AgS0AeUeA9LbWfHmh2KruEl5Gzjr8inc3/joNfW/jvxjbeB/DE2r3ELTuHWKGFW2+ZIc4GewwCSfQV5N+zr4TZft3im5jwrA2loT35zIw/ID/AL6r1nx34OtfHPhibR7iYwPvWWGYLu8uQZwcdxgkEehoA8y8BfHa717xLa6Pr2n2sIvJBHBPa7lCufuqysTnJ4zngkcV7lXhvgH4D3eg+JbbWNd1G1mFnIJYILTcwZx90szAYwecAdcc17lQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFGe1FGM0AfII+CfxCOP+Kf/wDJyD/4uvr6kxS0AFcV8U/DereLfBUukaO1us8k8bus7lQ6L82AQDzkL1x0rtaTAoA+SIvgb4/kmEb6PFEuceY95EVHvwxP6V738NvhlY+AbKSRpRd6tcACa524Cr12ID0Hqep/AAd7RQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAZr5B/4Ul8Q/8AoX//ACdt/wD45X16RmlxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABUN1JNFZzyW8JnnSNmjiDBfMYDhcngZPGTU1FAHyGvwR+ILMAdBC5PLG8g49+Hr66jRI41jjUKiAKqgYAA6ClxS0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXgHi74A3l74lW80PUhJZ3lxuuhdNmWDccs4P8Y69cHOOvJHv9GOc0AQWdpBYWcNpaRLFbwoI441GAqgYAFT0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH/9k=' class BackgroundIndexFileGenerator: def __init__(self, dir_path): self.dir_path = dir_path self.thread = threading.Thread(target=self._process, args=()) self.thread.daemon = True def _process(self): _create_index_files(self.dir_path) def run(self): self.thread.start() def _clean_up(paths): """ Clean up after ourselves, removing created files. @param {[String]} A list of file paths specifying the files we've created during run. Will all be deleted. @return {None} """ print('Cleaning up') # Iterate over the given paths, unlinking them for path in paths: print('Removing %s' % path) os.unlink(path) def _create_index_file( root_dir, location, image_files, dirs, force_no_processing=False): """ Create an index file in the given location, supplying known lists of present image files and subdirectories. @param {String} root_dir - The root directory of the entire crawl. Used to ascertain whether the given location is the top level. @param {String} location - The current directory of the crawl. The index file will be created here. @param {[String]} image_files - A list of image file names in the location. These will be displayed in the index file's gallery. @param {[String]} dirs - The subdirectories of the location directory. These will be displayed as links further down the file structure. @param {Boolean=False} force_no_processing - If True, do not attempt to actually process thumbnails, PIL images or anything. Simply index <img> tags with original file src attributes. @return {String} The full path (location plus filename) of the newly created index file. Intended for usage cleaning up created files. """ # Put together HTML as a list of the lines we'll want to include # Issue #2 exists to do this better than HTML in-code header_text = \ 'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]' html = [ '<!DOCTYPE html>', '<html>', ' <head>', ' <title>imageMe</title>' ' <style>', ' html, body {margin: 0;padding: 0;}', ' .header {text-align: right;}', ' .content {', ' padding: 3em;', ' padding-left: 4em;', ' padding-right: 4em;', ' }', ' .image {max-width: 100%; border-radius: 0.3em;}', ' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}', ' </style>', ' </head>', ' <body>', ' <div class="content">', ' <h2 class="header">' + header_text + '</h2>' ] # Populate the present subdirectories - this includes '..' unless we're at # the top level directories = [] if root_dir != location: directories = ['..'] directories += dirs if len(directories) > 0: html.append('<hr>') # For each subdirectory, include a link to its index file for directory in directories: link = directory + '/' + INDEX_FILE_NAME html += [ ' <h3 class="header">', ' <a href="' + link + '">' + directory + '</a>', ' </h3>' ] # Populate the image gallery table # Counter to cycle down through table rows table_row_count = 1 html += ['<hr>', '<table>'] # For each image file, potentially create a new <tr> and create a new <td> for image_file in image_files: if table_row_count == 1: html.append('<tr>') img_src = _get_thumbnail_src_from_file( location, image_file, force_no_processing ) link_target = _get_image_link_target_from_file( location, image_file, force_no_processing ) html += [ ' <td>', ' <a href="' + link_target + '">', ' <img class="image" src="' + img_src + '">', ' </a>', ' </td>' ] if table_row_count == IMAGES_PER_ROW: table_row_count = 0 html.append('</tr>') table_row_count += 1 html += ['</tr>', '</table>'] html += [ ' </div>', ' </body>', '</html>' ] # Actually create the file, now we've put together the HTML content index_file_path = _get_index_file_path(location) print('Creating index file %s' % index_file_path) index_file = open(index_file_path, 'w') index_file.write('\n'.join(html)) index_file.close() # Return the path for cleaning up later return index_file_path def _create_index_files(root_dir, force_no_processing=False): """ Crawl the root directory downwards, generating an index HTML file in each directory on the way down. @param {String} root_dir - The top level directory to crawl down from. In normal usage, this will be '.'. @param {Boolean=False} force_no_processing - If True, do not attempt to actually process thumbnails, PIL images or anything. Simply index <img> tags with original file src attributes. @return {[String]} Full file paths of all created files. """ # Initialise list of created file paths to build up as we make them created_files = [] # Walk the root dir downwards, creating index files as we go for here, dirs, files in os.walk(root_dir): print('Processing %s' % here) # Sort the subdirectories by name dirs = sorted(dirs) # Get image files - all files in the directory matching IMAGE_FILE_REGEX image_files = [f for f in files if re.match(IMAGE_FILE_REGEX, f)] # Sort the image files by name image_files = sorted(image_files) # Create this directory's index file and add its name to the created # files list created_files.append( _create_index_file( root_dir, here, image_files, dirs, force_no_processing ) ) # Return the list of created files return created_files def _get_image_from_file(dir_path, image_file): """ Get an instance of PIL.Image from the given file. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @return {PIL.Image} An instance of the image file as a PIL Image, or None if the functionality is not available. This could be because PIL is not present, or because it can't process the given file type. """ # Save ourselves the effort if PIL is not present, and return None now if not PIL_ENABLED: return None # Put together full path path = os.path.join(dir_path, image_file) # Try to read the image img = None try: img = Image.open(path) except IOError as exptn: print('Error loading image file %s: %s' % (path, exptn)) # Return image or None return img def _get_image_link_target_from_file(dir_path, image_file, force_no_processing=False): """ Get the value to be used as the href for links from thumbnail images. For most image formats this will simply be the image file name itself. However, some image formats (tif) are not natively displayable by many browsers and therefore we must link to image data in another format. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @param {Boolean=False} force_no_processing - If True, do not attempt to actually process a thumbnail, PIL image or anything. Simply return the image filename as src. @return {String} The href to use. """ # If we've specified to force no processing, just return the image filename if force_no_processing: return image_file # First try to get an image img = _get_image_from_file(dir_path, image_file) # If format is directly displayable in-browser, just return the filename # Else, we need to return a full-sized chunk of displayable image data if img.format.lower() in ['tif', 'tiff']: return _get_image_src_from_file( dir_path, image_file, force_no_processing ) return image_file def _get_image_src_from_file(dir_path, image_file, force_no_processing=False): """ Get base-64 encoded data as a string for the given image file's full image, for use directly in HTML <img> tags, or a path to the original if image scaling is not supported. This is a full-sized version of _get_thumbnail_src_from_file, for use in image formats which cannot be displayed directly in-browser, and therefore need processed versions even at full size. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @param {Boolean=False} force_no_processing - If True, do not attempt to actually process a thumbnail, PIL image or anything. Simply return the image filename as src. @return {String} The base-64 encoded image data string, or path to the file itself if not supported. """ # If we've specified to force no processing, just return the image filename if force_no_processing: if image_file.endswith('tif') or image_file.endswith('tiff'): return UNSUPPORTED_IMAGE_TYPE_DATA return image_file # First try to get an image img = _get_image_from_file(dir_path, image_file) return _get_src_from_image(img, image_file) def _get_index_file_path(location): """ Get the full file path to be used for an index file in the given location. Yields location plus the constant INDEX_FILE_NAME. @param {String} location - A directory location in which we want to create a new index file. @return {String} A file path for usage with a new index file. """ return os.path.join(location, INDEX_FILE_NAME) def _get_server_port(): """ Get the port specified for the server to run on. If given as the first command line argument, we'll use that. Else we'll default to 8000. @return {Integer} The port to run the server on. Default 8000, overridden by first command line argument. """ return int(sys.argv[1]) if len(sys.argv) >= 2 else 8000 def _get_src_from_image(img, fallback_image_file): """ Get base-64 encoded data as a string for the given image. Fallback to return fallback_image_file if cannot get the image data or img is None. @param {Image} img - The PIL Image to get src data for @param {String} fallback_image_file - The filename of the image file, to be used when image data capture fails @return {String} The base-64 encoded image data string, or path to the file itself if not supported. """ # If the image is None, then we can't process, so we should return the # path to the file itself if img is None: return fallback_image_file # Target format should be the same as the original image format, unless it's # a TIF/TIFF, which can't be displayed by most browsers; we convert these # to jpeg target_format = img.format if target_format.lower() in ['tif', 'tiff']: target_format = 'JPEG' # If we have an actual Image, great - put together the base64 image string try: bytesio = io.BytesIO() img.save(bytesio, target_format) byte_value = bytesio.getvalue() b64 = base64.b64encode(byte_value) return 'data:image/%s;base64,%s' % (target_format.lower(), b64) except IOError as exptn: print('IOError while saving image bytes: %s' % exptn) return fallback_image_file def _get_thumbnail_image_from_file(dir_path, image_file): """ Get a PIL.Image from the given image file which has been scaled down to THUMBNAIL_WIDTH wide. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @return {PIL.Image} An instance of the thumbnail as a PIL Image, or None if the functionality is not available. See _get_image_from_file for details. """ # Get image img = _get_image_from_file(dir_path, image_file) # If it's not supported, exit now if img is None: return None if img.format.lower() == 'gif': return None # Get image dimensions img_width, img_height = img.size # We need to perform a resize - first, work out the scale ratio to take the # image width to THUMBNAIL_WIDTH (THUMBNAIL_WIDTH:img_width ratio) scale_ratio = THUMBNAIL_WIDTH / float(img_width) # Work out target image height based on the scale ratio target_height = int(scale_ratio * img_height) # Perform the resize try: img.thumbnail((THUMBNAIL_WIDTH, target_height), resample=RESAMPLE) except IOError as exptn: print('WARNING: IOError when thumbnailing %s/%s: %s' % ( dir_path, image_file, exptn )) return None # Return the resized image return img def _get_thumbnail_src_from_file(dir_path, image_file, force_no_processing=False): """ Get base-64 encoded data as a string for the given image file's thumbnail, for use directly in HTML <img> tags, or a path to the original if image scaling is not supported. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @param {Boolean=False} force_no_processing - If True, do not attempt to actually process a thumbnail, PIL image or anything. Simply return the image filename as src. @return {String} The base-64 encoded image data string, or path to the file itself if not supported. """ # If we've specified to force no processing, just return the image filename if force_no_processing: if image_file.endswith('tif') or image_file.endswith('tiff'): return UNSUPPORTED_IMAGE_TYPE_DATA return image_file # First try to get a thumbnail image img = _get_thumbnail_image_from_file(dir_path, image_file) return _get_src_from_image(img, image_file) def _run_server(): """ Run the image server. This is blocking. Will handle user KeyboardInterrupt and other exceptions appropriately and return control once the server is stopped. @return {None} """ # Get the port to run on port = _get_server_port() # Configure allow_reuse_address to make re-runs of the script less painful - # if this is not True then waiting for the address to be freed after the # last run can block a subsequent run socketserver.TCPServer.allow_reuse_address = True # Create the server instance server = socketserver.TCPServer( ('', port), http.server.SimpleHTTPRequestHandler ) # Print out before actually running the server (cheeky / optimistic, however # you want to look at it) print('Your images are at http://127.0.0.1:%d/%s' % ( port, INDEX_FILE_NAME )) # Try to run the server try: # Run it - this call blocks until the server is killed server.serve_forever() except KeyboardInterrupt: # This is the expected way of the server being killed, since imageMe is # intended for ad-hoc running from command line print('User interrupted, stopping') except Exception as exptn: # Catch everything else - this will handle shutdowns via other signals # and faults actually starting the server in the first place print(exptn) print('Unhandled exception in server, stopping') def serve_dir(dir_path): """ Generate indexes and run server from the given directory downwards. @param {String} dir_path - The directory path (absolute, or relative to CWD) @return {None} """ # Create index files, and store the list of their paths for cleanup later # This time, force no processing - this gives us a fast first-pass in terms # of page generation, but potentially slow serving for large image files print('Performing first pass index file generation') created_files = _create_index_files(dir_path, True) if (PIL_ENABLED): # If PIL is enabled, we'd like to process the HTML indexes to include # generated thumbnails - this slows down generation so we don't do it # first time around, but now we're serving it's good to do in the # background print('Performing PIL-enchanced optimised index file generation in background') background_indexer = BackgroundIndexFileGenerator(dir_path) background_indexer.run() # Run the server in the current location - this blocks until it's stopped _run_server() # Clean up the index files created earlier so we don't make a mess of # the image directories _clean_up(created_files) if __name__ == '__main__': # Generate indices and serve from the current directory downwards when run # as the entry point serve_dir('.')
155.351738
57,015
0.881778
5,217
75,967
12.772666
0.52674
0.007699
0.005358
0.002881
0.056652
0.049433
0.046972
0.046102
0.043761
0.042965
0
0.109361
0.064857
75,967
488
57,016
155.670082
0.828632
0.131715
0
0.189815
1
0
0.893846
0.873368
0
1
0.000061
0
0
1
0.074074
false
0.00463
0.018519
0
0.199074
0.074074
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
1
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
7
166b0e9fcf52d892b054f980b0113351603fa15c
12,723
py
Python
tests/resampler_batch_test.py
tdml13/NiftyNet
b35fa19ca307e81d229e2fe8269a417724833da2
[ "Apache-2.0" ]
1,403
2017-08-30T11:49:45.000Z
2022-03-31T11:44:05.000Z
tests/resampler_batch_test.py
tdml13/NiftyNet
b35fa19ca307e81d229e2fe8269a417724833da2
[ "Apache-2.0" ]
360
2017-10-03T15:33:53.000Z
2021-03-17T06:27:38.000Z
tests/resampler_batch_test.py
tdml13/NiftyNet
b35fa19ca307e81d229e2fe8269a417724833da2
[ "Apache-2.0" ]
464
2017-09-13T20:56:32.000Z
2022-02-11T20:33:47.000Z
from __future__ import absolute_import, print_function, division import numpy as np import tensorflow as tf from niftynet.layer.resampler import ResamplerLayer from tests.niftynet_testcase import NiftyNetTestCase class ResamplerTest(NiftyNetTestCase): def test_shape_interface(self): test_input = tf.zeros((2, 10, 10, 10, 3)) test_coords = tf.zeros((3, 5, 5, 5, 3)) # bad batch sizes with self.assertRaisesRegexp(ValueError, ''): out = ResamplerLayer()(test_input, test_coords) test_input = tf.zeros((2, 10, 10, 10, 3)) test_coords = tf.zeros((5, 5, 5, 3)) # bad batch sizes with self.assertRaisesRegexp(ValueError, ''): out = ResamplerLayer()(test_input, test_coords) test_input = tf.zeros((1, 10, 10, 3)) test_coords = tf.zeros((1, 5, 5, 3)) # bad n coordinates with self.assertRaisesRegexp(ValueError, ''): out = ResamplerLayer()(test_input, test_coords) def test_linear_shape(self): # 3D test_input = np.zeros((2, 8, 8, 8, 2)) test_input[0, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 5, 3)) * 0.1 out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9**3, atol=1e-5))) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 8, 8, 2)) test_input[0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 2)) * 0.1 out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9**2, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 8, 2)) test_input[0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 1)) * 0.1 out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) def test_linear_no_broadcasting(self): # 3D test_input = np.zeros((2, 8, 8, 8, 2)) test_input[:, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 5, 3)) * 0.1, tf.ones((1, 5, 5, 5, 3)) * 0.2], axis=0) out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9**3, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.8**3, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 8, 8, 2)) test_input[:, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 2)) * 0.1, tf.ones((1, 5, 5, 2)) * 0.2], axis=0) out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9**2, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.8**2, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 8, 2)) test_input[:, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 1)) * 0.1, tf.ones((1, 5, 1)) * 0.2], axis=0) out = ResamplerLayer("LINEAR")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 0.9, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.8, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) def test_nearest_shape(self): # 3D test_input = np.zeros((2, 8, 8, 8, 2)) test_input[0, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 5, 3)) * 0.1 out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 8, 8, 2)) test_input[0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 2)) * 0.1 out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 8, 2)) test_input[0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 1)) * 0.1 out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) def test_nearest_no_broadcasting(self): # 3D test_input = np.zeros((2, 3, 3, 3, 2)) test_input[:, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 5, 3)) * 0.1, tf.ones((1, 5, 5, 5, 3)) * 1.2], axis=0) out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 3, 3, 2)) test_input[:, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 2)) * 0.1, tf.ones((1, 5, 5, 2)) * 1.2], axis=0) out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 3, 2)) test_input[:, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 1)) * 0.1, tf.ones((1, 5, 1)) * 1.2], axis=0) out = ResamplerLayer("NEAREST")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0, atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) def test_idw_shape(self): # 3D test_input = np.zeros((2, 8, 8, 8, 2)) test_input[0, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 5, 3)) * 0.1 out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0/(1. + 9./83 + 9./163 + 3./243), atol=1e-5))) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 8, 8, 2)) test_input[0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 5, 2)) * 0.1 out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1./(2./41. + 1./81.0 + 1.0), atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 8, 2)) test_input[0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.ones((1, 5, 1)) * 0.1 out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue(np.all(out_value[1, ...]==0)) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 100.0/(100.0+1/0.81), atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) def test_idw_no_broadcasting(self): # 3D test_input = np.zeros((2, 3, 3, 3, 2)) test_input[:, 0, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 5, 3)) * 0.2, tf.ones((1, 5, 5, 5, 3)) * 1.2], axis=0) out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0/(1. + 1./2. + 36./132. + 12./192.), atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 5, 2)) # 2D test_input = np.zeros((2, 3, 3, 2)) test_input[:, 0, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 5, 2)) * 0.2, tf.ones((1, 5, 5, 2)) * 1.2], axis=0) out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0/(1.0 + 1.0/16.0 + 16./68.), atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 5, 2)) # 1D test_input = np.zeros((2, 3, 2)) test_input[:, 0, 0] = 1.0 test_input = tf.constant(test_input) test_coords = tf.concat([tf.ones((1, 5, 1)) * 0.2, tf.ones((1, 5, 1)) * 1.2], axis=0) out = ResamplerLayer("IDW")(test_input, test_coords) with self.cached_session() as sess: out_value = sess.run(out) self.assertTrue( np.all(np.isclose(out_value[0, ..., 0], 1.0/(1.0 + 1/16.0), atol=1e-5))) self.assertTrue( np.all(np.isclose(out_value[1, ..., 0], 0.0, atol=1e-5))) self.assertEqual(out_value.shape, (2, 5, 2)) if __name__ == "__main__": tf.test.main()
43.571918
76
0.516545
1,865
12,723
3.38445
0.04504
0.136882
0.080323
0.117395
0.944867
0.94455
0.94455
0.940906
0.940748
0.938371
0
0.081992
0.316513
12,723
291
77
43.721649
0.643859
0.008096
0
0.863454
0
0
0.008255
0
0
0
0
0
0.228916
1
0.028112
false
0
0.02008
0
0.052209
0.004016
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
167b34c312d1a59bc27b2a2321cd692d9e8b7a5c
214,271
py
Python
msgraph-cli-extensions/v1_0/planner_v1_0/azext_planner_v1_0/generated/custom.py
thewahome/msgraph-cli
33127d9efa23a0e5f5303c93242fbdbb73348671
[ "MIT" ]
null
null
null
msgraph-cli-extensions/v1_0/planner_v1_0/azext_planner_v1_0/generated/custom.py
thewahome/msgraph-cli
33127d9efa23a0e5f5303c93242fbdbb73348671
[ "MIT" ]
null
null
null
msgraph-cli-extensions/v1_0/planner_v1_0/azext_planner_v1_0/generated/custom.py
thewahome/msgraph-cli
33127d9efa23a0e5f5303c93242fbdbb73348671
[ "MIT" ]
null
null
null
# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # pylint: disable=too-many-lines def planner_group_delete_planner(client, group_id, if_match=None): return client.delete_planner(group_id=group_id, if_match=if_match) def planner_group_show_planner(client, group_id, select=None, expand=None): return client.get_planner(group_id=group_id, select=select, expand=expand) def planner_group_update_planner(client, group_id, id_=None, plans=None): body = {} body['id'] = id_ body['plans'] = plans return client.update_planner(group_id=group_id, body=body) def planner_group_planner_create_plan(client, group_id, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.create_plans(group_id=group_id, body=body) def planner_group_planner_delete_plan(client, group_id, planner_plan_id, if_match=None): return client.delete_plans(group_id=group_id, planner_plan_id=planner_plan_id, if_match=if_match) def planner_group_planner_list_plan(client, group_id, orderby=None, select=None, expand=None): return client.list_plans(group_id=group_id, orderby=orderby, select=select, expand=expand) def planner_group_planner_show_plan(client, group_id, planner_plan_id, select=None, expand=None): return client.get_plans(group_id=group_id, planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_group_planner_update_plan(client, group_id, planner_plan_id, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.update_plans(group_id=group_id, planner_plan_id=planner_plan_id, body=body) def planner_group_planner_plan_create_bucket(client, group_id, planner_plan_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.create_buckets(group_id=group_id, planner_plan_id=planner_plan_id, body=body) def planner_group_planner_plan_create_task(client, group_id, planner_plan_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(group_id=group_id, planner_plan_id=planner_plan_id, body=body) def planner_group_planner_plan_delete_bucket(client, group_id, planner_plan_id, planner_bucket_id, if_match=None): return client.delete_buckets(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, if_match=if_match) def planner_group_planner_plan_delete_detail(client, group_id, planner_plan_id, if_match=None): return client.delete_details(group_id=group_id, planner_plan_id=planner_plan_id, if_match=if_match) def planner_group_planner_plan_delete_task(client, group_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_list_bucket(client, group_id, planner_plan_id, orderby=None, select=None, expand=None): return client.list_buckets(group_id=group_id, planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_group_planner_plan_list_task(client, group_id, planner_plan_id, orderby=None, select=None, expand=None): return client.list_tasks(group_id=group_id, planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_group_planner_plan_show_bucket(client, group_id, planner_plan_id, planner_bucket_id, select=None, expand=None): return client.get_buckets(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, select=select, expand=expand) def planner_group_planner_plan_show_detail(client, group_id, planner_plan_id, select=None, expand=None): return client.get_details(group_id=group_id, planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_group_planner_plan_show_task(client, group_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_update_bucket(client, group_id, planner_plan_id, planner_bucket_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.update_buckets(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_group_planner_plan_update_detail(client, group_id, planner_plan_id, id_=None, category_descriptions=None, shared_with=None): body = {} body['id'] = id_ body['category_descriptions'] = category_descriptions body['shared_with'] = shared_with return client.update_details(group_id=group_id, planner_plan_id=planner_plan_id, body=body) def planner_group_planner_plan_update_task(client, group_id, planner_plan_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_bucket_create_task(client, group_id, planner_plan_id, planner_bucket_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_group_planner_plan_bucket_delete_task(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_bucket_list_task(client, group_id, planner_plan_id, planner_bucket_id, orderby=None, select=None, expand=None): return client.list_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, orderby=orderby, select=select, expand=expand) def planner_group_planner_plan_bucket_show_task(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_bucket_update_task(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_bucket_task_delete_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_bucket_task_delete_bucket_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_bucket_task_delete_detail(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_bucket_task_delete_progress_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_bucket_task_show_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_bucket_task_show_bucket_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_bucket_task_show_detail(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_bucket_task_show_progress_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_bucket_task_update_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_bucket_task_update_bucket_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_bucket_task_update_detail(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_bucket_task_update_progress_task_board_format(client, group_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_task_delete_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_task_delete_bucket_task_board_format(client, group_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_task_delete_detail(client, group_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_task_delete_progress_task_board_format(client, group_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_group_planner_plan_task_show_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_task_show_bucket_task_board_format(client, group_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_task_show_detail(client, group_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_task_show_progress_task_board_format(client, group_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_group_planner_plan_task_update_assigned_to_task_board_format(client, group_id, planner_plan_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_task_update_bucket_task_board_format(client, group_id, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_task_update_detail(client, group_id, planner_plan_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_group_planner_plan_task_update_progress_task_board_format(client, group_id, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(group_id=group_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_update(client, id_=None, buckets=None, plans=None, tasks=None): body = {} body['id'] = id_ body['buckets'] = buckets body['plans'] = plans body['tasks'] = tasks return client.update_planner(body=body) def planner_planner_show_planner(client, select=None, expand=None): return client.get_planner(select=select, expand=expand) def planner_planner_create_bucket(client, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.create_buckets(body=body) def planner_planner_create_plan(client, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.create_plans(body=body) def planner_planner_create_task(client, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(body=body) def planner_planner_delete_bucket(client, planner_bucket_id, if_match=None): return client.delete_buckets(planner_bucket_id=planner_bucket_id, if_match=if_match) def planner_planner_delete_plan(client, planner_plan_id, if_match=None): return client.delete_plans(planner_plan_id=planner_plan_id, if_match=if_match) def planner_planner_delete_task(client, planner_task_id, if_match=None): return client.delete_tasks(planner_task_id=planner_task_id, if_match=if_match) def planner_planner_list_bucket(client, orderby=None, select=None, expand=None): return client.list_buckets(orderby=orderby, select=select, expand=expand) def planner_planner_list_plan(client, orderby=None, select=None, expand=None): return client.list_plans(orderby=orderby, select=select, expand=expand) def planner_planner_list_task(client, orderby=None, select=None, expand=None): return client.list_tasks(orderby=orderby, select=select, expand=expand) def planner_planner_show_bucket(client, planner_bucket_id, select=None, expand=None): return client.get_buckets(planner_bucket_id=planner_bucket_id, select=select, expand=expand) def planner_planner_show_plan(client, planner_plan_id, select=None, expand=None): return client.get_plans(planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_planner_show_task(client, planner_task_id, select=None, expand=None): return client.get_tasks(planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_update_bucket(client, planner_bucket_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.update_buckets(planner_bucket_id=planner_bucket_id, body=body) def planner_planner_update_plan(client, planner_plan_id, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.update_plans(planner_plan_id=planner_plan_id, body=body) def planner_planner_update_task(client, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(planner_task_id=planner_task_id, body=body) def planner_planner_bucket_create_task(client, planner_bucket_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(planner_bucket_id=planner_bucket_id, body=body) def planner_planner_bucket_delete_task(client, planner_bucket_id, planner_task_id, if_match=None): return client.delete_tasks(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_bucket_list_task(client, planner_bucket_id, orderby=None, select=None, expand=None): return client.list_tasks(planner_bucket_id=planner_bucket_id, orderby=orderby, select=select, expand=expand) def planner_planner_bucket_show_task(client, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_tasks(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_bucket_update_task(client, planner_bucket_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_bucket_task_delete_assigned_to_task_board_format(client, planner_bucket_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_bucket_task_delete_bucket_task_board_format(client, planner_bucket_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_bucket_task_delete_detail(client, planner_bucket_id, planner_task_id, if_match=None): return client.delete_details(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_bucket_task_delete_progress_task_board_format(client, planner_bucket_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_bucket_task_show_assigned_to_task_board_format(client, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_bucket_task_show_bucket_task_board_format(client, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_bucket_task_show_detail(client, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_details(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_bucket_task_show_progress_task_board_format(client, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_bucket_task_update_assigned_to_task_board_format(client, planner_bucket_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_bucket_task_update_bucket_task_board_format(client, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_bucket_task_update_detail(client, planner_bucket_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_bucket_task_update_progress_task_board_format(client, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_create_bucket(client, planner_plan_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.create_buckets(planner_plan_id=planner_plan_id, body=body) def planner_planner_plan_create_task(client, planner_plan_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(planner_plan_id=planner_plan_id, body=body) def planner_planner_plan_delete_bucket(client, planner_plan_id, planner_bucket_id, if_match=None): return client.delete_buckets(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, if_match=if_match) def planner_planner_plan_delete_detail(client, planner_plan_id, if_match=None): return client.delete_details(planner_plan_id=planner_plan_id, if_match=if_match) def planner_planner_plan_delete_task(client, planner_plan_id, planner_task_id, if_match=None): return client.delete_tasks(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_list_bucket(client, planner_plan_id, orderby=None, select=None, expand=None): return client.list_buckets(planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_planner_plan_list_task(client, planner_plan_id, orderby=None, select=None, expand=None): return client.list_tasks(planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_planner_plan_show_bucket(client, planner_plan_id, planner_bucket_id, select=None, expand=None): return client.get_buckets(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, select=select, expand=expand) def planner_planner_plan_show_detail(client, planner_plan_id, select=None, expand=None): return client.get_details(planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_planner_plan_show_task(client, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_tasks(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_update_bucket(client, planner_plan_id, planner_bucket_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.update_buckets(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_planner_plan_update_detail(client, planner_plan_id, id_=None, category_descriptions=None, shared_with=None): body = {} body['id'] = id_ body['category_descriptions'] = category_descriptions body['shared_with'] = shared_with return client.update_details(planner_plan_id=planner_plan_id, body=body) def planner_planner_plan_update_task(client, planner_plan_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_bucket_create_task(client, planner_plan_id, planner_bucket_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_planner_plan_bucket_delete_task(client, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_tasks(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_bucket_list_task(client, planner_plan_id, planner_bucket_id, orderby=None, select=None, expand=None): return client.list_tasks(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, orderby=orderby, select=select, expand=expand) def planner_planner_plan_bucket_show_task(client, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_tasks(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_bucket_update_task(client, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_bucket_task_delete_assigned_to_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_bucket_task_delete_bucket_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_bucket_task_delete_detail(client, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_details(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_bucket_task_delete_progress_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_bucket_task_show_assigned_to_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_bucket_task_show_bucket_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_bucket_task_show_detail(client, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_details(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_bucket_task_show_progress_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_bucket_task_update_assigned_to_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_bucket_task_update_bucket_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_bucket_task_update_detail(client, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_bucket_task_update_progress_task_board_format(client, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_task_delete_assigned_to_task_board_format(client, planner_plan_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_task_delete_bucket_task_board_format(client, planner_plan_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_task_delete_detail(client, planner_plan_id, planner_task_id, if_match=None): return client.delete_details(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_task_delete_progress_task_board_format(client, planner_plan_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_planner_plan_task_show_assigned_to_task_board_format(client, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_task_show_bucket_task_board_format(client, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_task_show_detail(client, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_details(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_task_show_progress_task_board_format(client, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_plan_task_update_assigned_to_task_board_format(client, planner_plan_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_task_update_bucket_task_board_format(client, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_task_update_detail(client, planner_plan_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_plan_task_update_progress_task_board_format(client, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_planner_task_delete_assigned_to_task_board_format(client, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(planner_task_id=planner_task_id, if_match=if_match) def planner_planner_task_delete_bucket_task_board_format(client, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(planner_task_id=planner_task_id, if_match=if_match) def planner_planner_task_delete_detail(client, planner_task_id, if_match=None): return client.delete_details(planner_task_id=planner_task_id, if_match=if_match) def planner_planner_task_delete_progress_task_board_format(client, planner_task_id, if_match=None): return client.delete_progress_task_board_format(planner_task_id=planner_task_id, if_match=if_match) def planner_planner_task_show_assigned_to_task_board_format(client, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_task_show_bucket_task_board_format(client, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_task_show_detail(client, planner_task_id, select=None, expand=None): return client.get_details(planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_task_show_progress_task_board_format(client, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(planner_task_id=planner_task_id, select=select, expand=expand) def planner_planner_task_update_assigned_to_task_board_format(client, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(planner_task_id=planner_task_id, body=body) def planner_planner_task_update_bucket_task_board_format(client, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(planner_task_id=planner_task_id, body=body) def planner_planner_task_update_detail(client, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(planner_task_id=planner_task_id, body=body) def planner_planner_task_update_progress_task_board_format(client, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(planner_task_id=planner_task_id, body=body) def planner_user_delete_planner(client, user_id, if_match=None): return client.delete_planner(user_id=user_id, if_match=if_match) def planner_user_show_planner(client, user_id, select=None, expand=None): return client.get_planner(user_id=user_id, select=select, expand=expand) def planner_user_update_planner(client, user_id, id_=None, plans=None, tasks=None): body = {} body['id'] = id_ body['plans'] = plans body['tasks'] = tasks return client.update_planner(user_id=user_id, body=body) def planner_user_planner_create_plan(client, user_id, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.create_plans(user_id=user_id, body=body) def planner_user_planner_create_task(client, user_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(user_id=user_id, body=body) def planner_user_planner_delete_plan(client, user_id, planner_plan_id, if_match=None): return client.delete_plans(user_id=user_id, planner_plan_id=planner_plan_id, if_match=if_match) def planner_user_planner_delete_task(client, user_id, planner_task_id, if_match=None): return client.delete_tasks(user_id=user_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_list_plan(client, user_id, orderby=None, select=None, expand=None): return client.list_plans(user_id=user_id, orderby=orderby, select=select, expand=expand) def planner_user_planner_list_task(client, user_id, orderby=None, select=None, expand=None): return client.list_tasks(user_id=user_id, orderby=orderby, select=select, expand=expand) def planner_user_planner_show_plan(client, user_id, planner_plan_id, select=None, expand=None): return client.get_plans(user_id=user_id, planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_user_planner_show_task(client, user_id, planner_task_id, select=None, expand=None): return client.get_tasks(user_id=user_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_update_plan(client, user_id, planner_plan_id, id_=None, created_date_time=None, owner=None, title=None, buckets=None, tasks=None, microsoft_graph_entity_id=None, category_descriptions=None, shared_with=None, application=None, device=None, user=None): body = {} body['id'] = id_ body['created_date_time'] = created_date_time body['owner'] = owner body['title'] = title body['buckets'] = buckets body['tasks'] = tasks body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['category_descriptions'] = category_descriptions body['details']['shared_with'] = shared_with body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user return client.update_plans(user_id=user_id, planner_plan_id=planner_plan_id, body=body) def planner_user_planner_update_task(client, user_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(user_id=user_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_create_bucket(client, user_id, planner_plan_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.create_buckets(user_id=user_id, planner_plan_id=planner_plan_id, body=body) def planner_user_planner_plan_create_task(client, user_id, planner_plan_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(user_id=user_id, planner_plan_id=planner_plan_id, body=body) def planner_user_planner_plan_delete_bucket(client, user_id, planner_plan_id, planner_bucket_id, if_match=None): return client.delete_buckets(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, if_match=if_match) def planner_user_planner_plan_delete_detail(client, user_id, planner_plan_id, if_match=None): return client.delete_details(user_id=user_id, planner_plan_id=planner_plan_id, if_match=if_match) def planner_user_planner_plan_delete_task(client, user_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_list_bucket(client, user_id, planner_plan_id, orderby=None, select=None, expand=None): return client.list_buckets(user_id=user_id, planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_user_planner_plan_list_task(client, user_id, planner_plan_id, orderby=None, select=None, expand=None): return client.list_tasks(user_id=user_id, planner_plan_id=planner_plan_id, orderby=orderby, select=select, expand=expand) def planner_user_planner_plan_show_bucket(client, user_id, planner_plan_id, planner_bucket_id, select=None, expand=None): return client.get_buckets(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, select=select, expand=expand) def planner_user_planner_plan_show_detail(client, user_id, planner_plan_id, select=None, expand=None): return client.get_details(user_id=user_id, planner_plan_id=planner_plan_id, select=select, expand=expand) def planner_user_planner_plan_show_task(client, user_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_update_bucket(client, user_id, planner_plan_id, planner_bucket_id, id_=None, name=None, order_hint=None, plan_id=None, tasks=None): body = {} body['id'] = id_ body['name'] = name body['order_hint'] = order_hint body['plan_id'] = plan_id body['tasks'] = tasks return client.update_buckets(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_user_planner_plan_update_detail(client, user_id, planner_plan_id, id_=None, category_descriptions=None, shared_with=None): body = {} body['id'] = id_ body['category_descriptions'] = category_descriptions body['shared_with'] = shared_with return client.update_details(user_id=user_id, planner_plan_id=planner_plan_id, body=body) def planner_user_planner_plan_update_task(client, user_id, planner_plan_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_bucket_create_task(client, user_id, planner_plan_id, planner_bucket_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.create_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, body=body) def planner_user_planner_plan_bucket_delete_task(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_bucket_list_task(client, user_id, planner_plan_id, planner_bucket_id, orderby=None, select=None, expand=None): return client.list_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, orderby=orderby, select=select, expand=expand) def planner_user_planner_plan_bucket_show_task(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_bucket_update_task(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, active_checklist_item_count=None, applied_categories=None, assignee_priority=None, assignments=None, bucket_id=None, checklist_item_count=None, completed_date_time=None, conversation_thread_id=None, created_date_time=None, due_date_time=None, has_description=None, order_hint=None, percent_complete=None, plan_id=None, preview_type=None, reference_count=None, start_date_time=None, title=None, bucket_task_board_format=None, progress_task_board_format=None, microsoft_graph_entity_id=None, checklist=None, description=None, microsoft_graph_planner_preview_type=None, references=None, id1=None, order_hints_by_assignee=None, unassigned_order_hint=None, application=None, device=None, user=None, microsoft_graph_identity_application=None, microsoft_graph_identity_device=None, microsoft_graph_identity_user=None): body = {} body['id'] = id_ body['active_checklist_item_count'] = active_checklist_item_count body['applied_categories'] = applied_categories body['assignee_priority'] = assignee_priority body['assignments'] = assignments body['bucket_id'] = bucket_id body['checklist_item_count'] = checklist_item_count body['completed_date_time'] = completed_date_time body['conversation_thread_id'] = conversation_thread_id body['created_date_time'] = created_date_time body['due_date_time'] = due_date_time body['has_description'] = has_description body['order_hint'] = order_hint body['percent_complete'] = percent_complete body['plan_id'] = plan_id body['preview_type'] = preview_type body['reference_count'] = reference_count body['start_date_time'] = start_date_time body['title'] = title body['bucket_task_board_format'] = bucket_task_board_format body['progress_task_board_format'] = progress_task_board_format body['details'] = {} body['details']['id'] = microsoft_graph_entity_id body['details']['checklist'] = checklist body['details']['description'] = description body['details']['preview_type'] = microsoft_graph_planner_preview_type body['details']['references'] = references body['assigned_to_task_board_format'] = {} body['assigned_to_task_board_format']['id'] = id1 body['assigned_to_task_board_format']['order_hints_by_assignee'] = order_hints_by_assignee body['assigned_to_task_board_format']['unassigned_order_hint'] = unassigned_order_hint body['created_by'] = {} body['created_by']['application'] = application body['created_by']['device'] = device body['created_by']['user'] = user body['completed_by'] = {} body['completed_by']['application'] = microsoft_graph_identity_application body['completed_by']['device'] = microsoft_graph_identity_device body['completed_by']['user'] = microsoft_graph_identity_user return client.update_tasks(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_bucket_task_delete_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_bucket_task_delete_bucket_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_bucket_task_delete_detail(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_bucket_task_delete_progress_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_bucket_task_show_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_bucket_task_show_bucket_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_bucket_task_show_detail(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_bucket_task_show_progress_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_bucket_task_update_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_bucket_task_update_bucket_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_bucket_task_update_detail(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_bucket_task_update_progress_task_board_format(client, user_id, planner_plan_id, planner_bucket_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_bucket_id=planner_bucket_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_task_delete_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_task_delete_bucket_task_board_format(client, user_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_task_delete_detail(client, user_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_task_delete_progress_task_board_format(client, user_id, planner_plan_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_plan_task_show_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_task_show_bucket_task_board_format(client, user_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_task_show_detail(client, user_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_task_show_progress_task_board_format(client, user_id, planner_plan_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_plan_task_update_assigned_to_task_board_format(client, user_id, planner_plan_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_task_update_bucket_task_board_format(client, user_id, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_task_update_detail(client, user_id, planner_plan_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_plan_task_update_progress_task_board_format(client, user_id, planner_plan_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(user_id=user_id, planner_plan_id=planner_plan_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_task_delete_assigned_to_task_board_format(client, user_id, planner_task_id, if_match=None): return client.delete_assigned_to_task_board_format(user_id=user_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_task_delete_bucket_task_board_format(client, user_id, planner_task_id, if_match=None): return client.delete_bucket_task_board_format(user_id=user_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_task_delete_detail(client, user_id, planner_task_id, if_match=None): return client.delete_details(user_id=user_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_task_delete_progress_task_board_format(client, user_id, planner_task_id, if_match=None): return client.delete_progress_task_board_format(user_id=user_id, planner_task_id=planner_task_id, if_match=if_match) def planner_user_planner_task_show_assigned_to_task_board_format(client, user_id, planner_task_id, select=None, expand=None): return client.get_assigned_to_task_board_format(user_id=user_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_task_show_bucket_task_board_format(client, user_id, planner_task_id, select=None, expand=None): return client.get_bucket_task_board_format(user_id=user_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_task_show_detail(client, user_id, planner_task_id, select=None, expand=None): return client.get_details(user_id=user_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_task_show_progress_task_board_format(client, user_id, planner_task_id, select=None, expand=None): return client.get_progress_task_board_format(user_id=user_id, planner_task_id=planner_task_id, select=select, expand=expand) def planner_user_planner_task_update_assigned_to_task_board_format(client, user_id, planner_task_id, id_=None, order_hints_by_assignee=None, unassigned_order_hint=None): body = {} body['id'] = id_ body['order_hints_by_assignee'] = order_hints_by_assignee body['unassigned_order_hint'] = unassigned_order_hint return client.update_assigned_to_task_board_format(user_id=user_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_task_update_bucket_task_board_format(client, user_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_bucket_task_board_format(user_id=user_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_task_update_detail(client, user_id, planner_task_id, id_=None, checklist=None, description=None, preview_type=None, references=None): body = {} body['id'] = id_ body['checklist'] = checklist body['description'] = description body['preview_type'] = preview_type body['references'] = references return client.update_details(user_id=user_id, planner_task_id=planner_task_id, body=body) def planner_user_planner_task_update_progress_task_board_format(client, user_id, planner_task_id, id_=None, order_hint=None): body = {} body['id'] = id_ body['order_hint'] = order_hint return client.update_progress_task_board_format(user_id=user_id, planner_task_id=planner_task_id, body=body)
53.972544
110
0.422288
16,306
214,271
5.066356
0.007114
0.096959
0.063732
0.068089
0.992967
0.98873
0.986382
0.984457
0.976468
0.972316
0
0.000352
0.523253
214,271
3,969
111
53.986143
0.808356
0.002193
0
0.920259
0
0
0.071194
0.025007
0
0
0
0
0
1
0.057763
false
0
0
0.036912
0.115526
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
1682a7be2337c1b4eb7b2b00ef83ea6214262534
6,221
py
Python
tensorflow-keras-mnist-demonstration.py
Xiaoyu-Xing/learning-machine-learning
92fc86a3b18b34d7d91e24d3e1693f27611cb08e
[ "MIT" ]
2
2019-02-16T21:41:30.000Z
2019-02-17T17:43:42.000Z
tensorflow-keras-mnist-demonstration.py
Xiaoyu-Xing/learning-machine-learning
92fc86a3b18b34d7d91e24d3e1693f27611cb08e
[ "MIT" ]
null
null
null
tensorflow-keras-mnist-demonstration.py
Xiaoyu-Xing/learning-machine-learning
92fc86a3b18b34d7d91e24d3e1693f27611cb08e
[ "MIT" ]
null
null
null
import tensorflow as tf import time mnist = tf.keras.datasets.mnist # Train 60000, test 10000 (x_train, y_train), (x_test, y_test) = mnist.load_data() # Normalize x_train, x_test = x_train / 255.0, x_test / 255.0 # Naive NN with 3 layers, SGD 0.01 lr, cross entropy loss def test1(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.01) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=20) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # NN with 3 layers, Adam 0.01 lr, cross entropy loss def test2(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) print(model) adam = tf.keras.optimizers.Adam(lr=0.01) model.compile(optimizer=adam, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=2) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # Compare to test1, config changed to: lr = 0.1, epochs = 5 def test3(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # Reduce layers: only one hidden layer def test4(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # Change hidden layer to 8 nodes def test5(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(8, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # No hidden layer def test6(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # Try to overfit it. def test7(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(2048, activation=tf.nn.relu), tf.keras.layers.Dense(4096, activation=tf.nn.relu), tf.keras.layers.Dense(10240, activation=tf.nn.relu), tf.keras.layers.Dense(4096, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) # Add regularizations. def test8(): start = time.time() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(2048, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.l2(0.01)), tf.keras.layers.Dense(4096, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.l2(0.01)), tf.keras.layers.Dense(10240, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.l2(0.01)), tf.keras.layers.Dense(4096, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.l2(0.01)), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) sgd = tf.keras.optimizers.SGD(lr=0.1) model.compile(optimizer=sgd, validation_split=0.1, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=64, epochs=5) result = model.evaluate(x_test, y_test) end = time.time() print(result, end - start) test1()
34.949438
81
0.618229
833
6,221
4.52461
0.120048
0.098435
0.110374
0.114619
0.885381
0.885381
0.877952
0.865216
0.865216
0.862563
0
0.039804
0.244816
6,221
177
82
35.146893
0.762452
0.05176
0
0.829932
0
0
0.052989
0.04212
0
0
0
0
0
1
0.054422
false
0
0.013605
0
0.068027
0.061224
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
169f25a6eaf477c587eec04cd7cb0c755def08e1
15,920
py
Python
alad/svhn_utilities.py
jzhao23/Adversarially-Learned-Anomaly-Detection
d01ce7fb15265d36de7550c92dbacbbb071fb9d2
[ "MIT" ]
null
null
null
alad/svhn_utilities.py
jzhao23/Adversarially-Learned-Anomaly-Detection
d01ce7fb15265d36de7550c92dbacbbb071fb9d2
[ "MIT" ]
null
null
null
alad/svhn_utilities.py
jzhao23/Adversarially-Learned-Anomaly-Detection
d01ce7fb15265d36de7550c92dbacbbb071fb9d2
[ "MIT" ]
null
null
null
""" CIFAR10 ALAD architecture. Generator (decoder), encoder and discriminator. """ import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from utils import sn learning_rate = 0.0002 batch_size = 32 latent_dim = 100 init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.01) def leakyReLu(x, alpha=0.2, name=None): if name: with tf.variable_scope(name): return tf.nn.relu(x) - (alpha * tf.nn.relu(-x)) else: return tf.nn.relu(x) - (alpha * tf.nn.relu(-x)) def encoder(x_inp, is_training=False, getter=None, reuse=False, do_spectral_norm=True): """ Encoder architecture in tensorflow Maps the data into the latent space Args: x_inp (tensor): input data for the encoder. is_training (bool): for batch norms and dropouts getter: for exponential moving average during inference reuse (bool): sharing variables or not Returns: net (tensor): last activation layer of the encoder """ layers = sn if do_spectral_norm else tf.layers with tf.variable_scope('encoder', reuse=reuse, custom_getter=getter): x_inp = tf.reshape(x_inp, [-1, 32, 32, 3]) name_net = 'layer_1' with tf.variable_scope(name_net): net = layers.conv2d(x_inp, 128, kernel_size=4, padding='SAME', strides=2, kernel_initializer=init_kernel, name='conv') net = tf.layers.batch_normalization(net, training=is_training) net = leakyReLu(net, name='leaky_relu') name_net = 'layer_2' with tf.variable_scope(name_net): net = layers.conv2d(net, 256, kernel_size=4, padding='SAME', strides=2, kernel_initializer=init_kernel, name='conv') net = tf.layers.batch_normalization(net, training=is_training) net = leakyReLu(net, name='leaky_relu') name_net = 'layer_3' with tf.variable_scope(name_net): net = layers.conv2d(net, 512, kernel_size=4, padding='SAME', strides=2, kernel_initializer=init_kernel, name='conv') net = tf.layers.batch_normalization(net, training=is_training) net = leakyReLu(net, name='leaky_relu') name_net = 'layer_4' with tf.variable_scope(name_net): net = tf.layers.conv2d(net, latent_dim, kernel_size=4, strides=1, padding='VALID', kernel_initializer=init_kernel, name='conv') net = tf.squeeze(net, [1, 2]) return net def decoder(z_inp, is_training=False, getter=None, reuse=False): """ Generator architecture in tensorflow Generates data from the latent space Args: z_inp (tensor): input variable in the latent space is_training (bool): for batch norms and dropouts getter: for exponential moving average during inference reuse (bool): sharing variables or not Returns: net (tensor): last activation layer of the generator """ with tf.variable_scope('generator', reuse=reuse, custom_getter=getter): net = tf.reshape(z_inp, [-1, 1, 1, latent_dim]) name_net = 'layer_1' with tf.variable_scope(name_net): net = tf.layers.conv2d_transpose(net, filters=512, kernel_size=4, strides=2, padding='VALID', kernel_initializer=init_kernel, name='tconv1') net = tf.layers.batch_normalization(net, training=is_training, name='tconv1/batch_normalization') net = tf.nn.relu(net, name='tconv1/relu') name_net = 'layer_2' with tf.variable_scope(name_net): net = tf.layers.conv2d_transpose(net, filters=256, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='tconv2') net = tf.layers.batch_normalization(net, training=is_training, name='tconv2/batch_normalization') net = tf.nn.relu(net, name='tconv2/relu') name_net = 'layer_3' with tf.variable_scope(name_net): net = tf.layers.conv2d_transpose(net, filters=128, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='tconv3') net = tf.layers.batch_normalization(net, training=is_training, name='tconv3/batch_normalization') net = tf.nn.relu(net, name='tconv3/relu') name_net = 'layer_4' with tf.variable_scope(name_net): net = tf.layers.conv2d_transpose(net, filters=3, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='tconv4') net = tf.tanh(net, name='tconv4/tanh') return net def discriminator_xz(x_inp, z_inp, is_training=False, getter=None, reuse=False, do_spectral_norm=True): """ Discriminator architecture in tensorflow Discriminates between pairs (E(x), x) and (z, G(z)) Args: x_inp (tensor): input data for the discriminator. z_inp (tensor): input variable in the latent space is_training (bool): for batch norms and dropouts getter: for exponential moving average during inference reuse (bool): sharing variables or not Returns: logits (tensor): last activation layer of the discriminator (shape 1) intermediate_layer (tensor): intermediate layer for feature matching """ layers = sn if do_spectral_norm else tf.layers with tf.variable_scope('discriminator_xz', reuse=reuse, custom_getter=getter): name_net = 'x_layer_1' with tf.variable_scope(name_net): x = layers.conv2d(x_inp, filters=128, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='conv1') x = leakyReLu(x, 0.2, name='conv1/leaky_relu') name_net = 'x_layer_2' with tf.variable_scope(name_net): x = layers.conv2d(x, filters=256, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='conv2') x = tf.layers.batch_normalization(x, training=is_training, name='conv2/batch_normalization') x = leakyReLu(x, 0.2, name='conv2/leaky_relu') name_net = 'x_layer_3' with tf.variable_scope(name_net): x = layers.conv2d(x, filters=512, kernel_size=4, strides=2, padding='SAME', kernel_initializer=init_kernel, name='conv3') x = tf.layers.batch_normalization(x, training=is_training, name='conv3/batch_normalization') x = leakyReLu(x, 0.2, name='conv3/leaky_relu') x = tf.reshape(x, [-1,1,1,512*4*4]) z = tf.reshape(z_inp, [-1, 1, 1, latent_dim]) name_net = 'z_layer_1' with tf.variable_scope(name_net): z = layers.conv2d(z, filters=512, kernel_size=1, strides=1, padding='SAME', kernel_initializer=init_kernel, name='conv') z = leakyReLu(z) z = tf.layers.dropout(z, rate=0.2, training=is_training, name='dropout') name_net = 'z_layer_2' with tf.variable_scope(name_net): z = layers.conv2d(z, filters=512, kernel_size=1, strides=1, padding='SAME', kernel_initializer=init_kernel, name='conv') z = leakyReLu(z) z = tf.layers.dropout(z, rate=0.2, training=is_training, name='dropout') y = tf.concat([x, z], axis=-1) name_net = 'y_layer_1' with tf.variable_scope(name_net): y = layers.conv2d(y, filters=1024, kernel_size=1, strides=1, padding='SAME', kernel_initializer=init_kernel, name='conv') y = leakyReLu(y) y = tf.layers.dropout(y, rate=0.2, training=is_training, name='dropout') intermediate_layer = y name_net = 'y_layer_2' with tf.variable_scope(name_net): y = tf.layers.conv2d(y, filters=1, kernel_size=1, strides=1, padding='SAME', kernel_initializer=init_kernel, name='conv') logits = tf.squeeze(y) return logits, intermediate_layer def discriminator_xx(x, rec_x, is_training=False, getter=None, reuse=False, do_spectral_norm=True): """ Discriminator architecture in tensorflow Discriminates between (x,x) and (x,rec_x) Args: x (tensor): input from the data space rec_x (tensor): reconstructed data is_training (bool): for batch norms and dropouts getter: for exponential moving average during inference reuse (bool): sharing variables or not Returns: logits (tensor): last activation layer of the discriminator intermediate_layer (tensor): intermediate layer for feature matching """ layers = sn if do_spectral_norm else tf.layers with tf.variable_scope('discriminator_xx', reuse=reuse, custom_getter=getter): net = tf.concat([x, rec_x], axis=1) name_net = 'layer_1' with tf.variable_scope(name_net): net = layers.conv2d(net, filters=64, kernel_size=5, strides=2, padding='SAME', kernel_initializer=init_kernel, name='conv1') net = leakyReLu(net, 0.2, name='conv1/leaky_relu') net = tf.layers.dropout(net, rate=0.2, training=is_training, name='dropout') with tf.variable_scope(name_net, reuse=True): weights = tf.get_variable('conv1/kernel') name_net = 'layer_2' with tf.variable_scope(name_net): net = layers.conv2d(net, filters=128, kernel_size=5, strides=2, padding='SAME', kernel_initializer=init_kernel, name='conv2') net = leakyReLu(net, 0.2, name='conv2/leaky_relu') net = tf.layers.dropout(net, rate=0.2, training=is_training, name='dropout') net = tf.layers.flatten(net) intermediate_layer = net name_net = 'layer_3' with tf.variable_scope(name_net): net = tf.layers.dense(net, units=1, kernel_initializer=init_kernel, name='fc') logits = tf.squeeze(net) return logits, intermediate_layer def discriminator_zz(z, rec_z, is_training=False, getter=None, reuse=False, do_spectral_norm=True): """ Discriminator architecture in tensorflow Discriminates between (z,z) and (z,rec_z) Args: z (tensor): input from the latent space rec_z (tensor): reconstructed data is_training (bool): for batch norms and dropouts getter: for exponential moving average during inference reuse (bool): sharing variables or not Returns: logits (tensor): last activation layer of the discriminator intermediate_layer (tensor): intermediate layer for feature matching """ layers = sn if do_spectral_norm else tf.layers with tf.variable_scope('discriminator_zz', reuse=reuse, custom_getter=getter): y = tf.concat([z, rec_z], axis=-1) name_net = 'y_layer_1' with tf.variable_scope(name_net): y = layers.dense(y, units=64, kernel_initializer=init_kernel, name='fc') y = leakyReLu(y) y = tf.layers.dropout(y, rate=0.2, training=is_training, name='dropout') name_net = 'y_layer_2' with tf.variable_scope(name_net): y = layers.dense(y, units=32, kernel_initializer=init_kernel, name='fc') y = leakyReLu(y) y = tf.layers.dropout(y, rate=0.2, training=is_training, name='dropout') intermediate_layer = y name_net = 'y_layer_3' with tf.variable_scope(name_net): y = tf.layers.dense(y, units=1, kernel_initializer=init_kernel, name='fc') logits = tf.squeeze(y) return logits, intermediate_layer
37.370892
82
0.468467
1,564
15,920
4.588875
0.095269
0.04194
0.054619
0.074126
0.853421
0.838094
0.810227
0.794622
0.736937
0.725373
0
0.024794
0.450251
15,920
426
83
37.370892
0.795247
0.141834
0
0.72242
0
0
0.054665
0.009546
0
0
0
0
0
1
0.021352
false
0
0.007117
0
0.053381
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
16a6913c3581e2f794a3996d00e10562614169eb
158
py
Python
src/monitoring/notifications/__init__.py
gkovacs81/argus_server
97ebf705ed3e61a69bd561faf495e2c19bda510d
[ "MIT" ]
null
null
null
src/monitoring/notifications/__init__.py
gkovacs81/argus_server
97ebf705ed3e61a69bd561faf495e2c19bda510d
[ "MIT" ]
3
2021-06-02T04:07:35.000Z
2021-12-27T20:21:46.000Z
src/monitoring/notifications/__init__.py
gkovacs81/argus_server
97ebf705ed3e61a69bd561faf495e2c19bda510d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: Gábor Kovács # @Date: 2021-02-25 20:08:34 # @Last Modified by: Gábor Kovács # @Last Modified time: 2021-02-25 20:08:35
26.333333
42
0.632911
27
158
3.703704
0.666667
0.22
0.16
0.2
0.24
0
0
0
0
0
0
0.224806
0.183544
158
5
43
31.6
0.550388
0.93038
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
bc56c2f2d9b6ad38f3a61ca4dd5ca6f490c4d6fd
6,068
py
Python
Whop.py
xliquid808/WhoopSongInfoGrabber
f7a6fa5c929182094e1a5433b88c956d99ff870e
[ "Apache-2.0" ]
null
null
null
Whop.py
xliquid808/WhoopSongInfoGrabber
f7a6fa5c929182094e1a5433b88c956d99ff870e
[ "Apache-2.0" ]
null
null
null
Whop.py
xliquid808/WhoopSongInfoGrabber
f7a6fa5c929182094e1a5433b88c956d99ff870e
[ "Apache-2.0" ]
null
null
null
import glob import os import time from mp3_tagger import MP3File def search_for_song(filename): import mp3_tagger from selenium import webdriver webdriver = webdriver.Chrome() webdriver.get("https://genius.com/search?q=" + file_query[0]) time.sleep(5) webdriver.find_element_by_xpath('//*[@id="onetrust-accept-btn-handler"]').click() correct = input("Is the displayed song correct? (Y/n) ") if correct == "Y": try: webdriver.find_element_by_xpath("/html/body/routable-page/ng-outlet/search-results-page/div/div[2]/div[1]/div[2]/search-result-section/div/div[2]/search-result-items/div[1]/search-result-item/div/mini-song-card/a").click() song_name = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[3]/div/h1').text artist = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[3]/div/a').text try: if webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[4]/div/div[1]/div[1]/p').text == "Featuring": featuring_artist = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[4]/div/div[1]/div[1]/a').text song_name = song_name + " (feat. " + featuring_artist + ")" print(song_name + " by " + artist) except: print(song_name + " by " + artist) mp3 = MP3File("songs/" + filename[0] + ".mp3") mp3.song = song_name mp3.artist = artist mp3.save() print("Set tags!") except: time.sleep(3) song_name = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/h1').text artist = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/h2/span/a').text try: if webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/ng-transclude/metadata/h3[1]/expandable-list/div/span[1]').text == "Featuring": featuring_artist = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/ng-transclude/metadata/h3[1]/expandable-list/div/span[2]/span/a').text song_name = song_name + " (feat. " + featuring_artist + ")" print(song_name + " by " + artist) except: print(song_name + " by " + artist) mp3 = MP3File("songs/" + filename[0] + ".mp3") mp3.song = song_name mp3.artist = artist mp3.save() print("Set tags!") else: song_query_manual = input("Please enter your query: ") webdriver.get("https://genius.com/search?q=" + song_query_manual) time.sleep(5) webdriver.find_element_by_xpath("/html/body/routable-page/ng-outlet/search-results-page/div/div[2]/div[1]/div[2]/search-result-section/div/div[2]/search-result-items/div[1]/search-result-item/div/mini-song-card/a").click() try: song_name = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[3]/div/h1').text artist = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[3]/div/a').text try: if webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[4]/div/div[1]/div[1]/p').text == "Featuring": featuring_artist = webdriver.find_element_by_xpath('//*[@id="application"]/main/div[1]/div[4]/div/div[1]/div[1]/a').text song_name = song_name + " (feat. " + featuring_artist + ")" print(song_name + " by " + artist) except: print(song_name + " by " + artist) mp3 = MP3File("songs/" + filename[0] + ".mp3") mp3.song = song_name mp3.artist = artist mp3.save() print("Set tags!") except: time.sleep(3) song_name = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/h1').text artist = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/h2/span/a').text try: try: if webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/ng-transclude/metadata/h3[1]/expandable-list/div/span[1]').text == "Featuring": featuring_artist = webdriver.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/header-with-cover-art/div/div/div[1]/div[2]/div/ng-transclude/metadata/h3[1]/expandable-list/div/span[2]/span/a').text song_name = song_name + " (feat. " + featuring_artist + ")" print(song_name + " by " + artist) except: print(song_name + " by " + artist) mp3 = MP3File("songs/" + filename[0] + ".mp3") mp3.song = song_name mp3.artist = artist mp3.save() print("Set tags!") except: print(song_name + " by " + artist) mp3 = MP3File("songs/" + filename[0] + ".mp3") mp3.song = song_name mp3.artist = artist mp3.save() print("Set tags!") for file_path in glob.iglob(r'songs\*.mp3'): file_query = os.path.basename(file_path) print(file_query) file_query = file_query.split(".mp3") print(file_query[0]) search_for_song(file_query)
45.62406
247
0.582564
815
6,068
4.202454
0.123926
0.056058
0.044964
0.122044
0.872701
0.872701
0.872117
0.852847
0.839124
0.839124
0
0.025193
0.254285
6,068
132
248
45.969697
0.731713
0
0
0.778947
0
0.147368
0.368326
0.312788
0
0
0
0
0
1
0.010526
false
0
0.063158
0
0.073684
0.168421
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bc6d333b2480b8c74cc013e8fb1025b883c44024
8,056
py
Python
day22_pong/draw_number.py
frnkvsk/python100days
70d607ca58a526f0d66544ed65405b2425718108
[ "Unlicense" ]
null
null
null
day22_pong/draw_number.py
frnkvsk/python100days
70d607ca58a526f0d66544ed65405b2425718108
[ "Unlicense" ]
null
null
null
day22_pong/draw_number.py
frnkvsk/python100days
70d607ca58a526f0d66544ed65405b2425718108
[ "Unlicense" ]
null
null
null
from turtle import Turtle class DrawScore: def __init__(self, num, x, y): self.segments = [[] for _ in range(5)] self.create_segments(x, y) self.draw_number(num) def create_segments(self, x, y): for i in range(0, 5): for j in range(0, 3): t = Turtle('square') t.shapesize(stretch_wid=.5, stretch_len=.5) t.goto(x + (j * 10), y - (i * 10)) self.segments[i].append(t) def draw_number(self, num): if num == 0: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('black') self.segments[2][2].color('white') self.segments[3][0].color('white') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 1: self.segments[0][0].color('black') self.segments[0][1].color('black') self.segments[0][2].color('white') self.segments[1][0].color('black') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('black') self.segments[2][1].color('black') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('black') self.segments[4][1].color('black') self.segments[4][2].color('white') elif num == 2: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('black') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('white') self.segments[3][1].color('black') self.segments[3][2].color('black') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 3: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('black') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 4: self.segments[0][0].color('white') self.segments[0][1].color('black') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('black') self.segments[4][1].color('black') self.segments[4][2].color('white') elif num == 5: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('black') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 6: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('black') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('white') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 7: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('black') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('black') self.segments[2][1].color('black') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('black') self.segments[4][1].color('black') self.segments[4][2].color('white') elif num == 8: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('white') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white') elif num == 9: self.segments[0][0].color('white') self.segments[0][1].color('white') self.segments[0][2].color('white') self.segments[1][0].color('white') self.segments[1][1].color('black') self.segments[1][2].color('white') self.segments[2][0].color('white') self.segments[2][1].color('white') self.segments[2][2].color('white') self.segments[3][0].color('black') self.segments[3][1].color('black') self.segments[3][2].color('white') self.segments[4][0].color('white') self.segments[4][1].color('white') self.segments[4][2].color('white')
36.618182
60
0.49007
1,004
8,056
3.921315
0.043825
0.463297
0.330709
0.519685
0.930404
0.923038
0.923038
0.923038
0.923038
0.923038
0
0.057859
0.311321
8,056
219
61
36.785388
0.651766
0
0
0.850575
0
0
0.096465
0
0
0
0
0
0
1
0.017241
false
0
0.005747
0
0.028736
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
bc7ab283bc3faf06f35ab56f7c809e433a119308
48,192
py
Python
Segmentation_UGSCNN/Projected_ResNet/MyDataLoader.py
Abdulah-Fawaz/Benchmarking-Surface-DL
9693379f26d57f9aabf28b973f40a9f6f627d26f
[ "MIT" ]
2
2021-12-04T07:04:56.000Z
2021-12-13T16:28:50.000Z
Segmentation_UGSCNN/Projected_ResNet/MyDataLoader.py
Abdulah-Fawaz/Benchmarking-Surface-DL
9693379f26d57f9aabf28b973f40a9f6f627d26f
[ "MIT" ]
1
2021-12-21T09:36:11.000Z
2022-01-25T10:26:43.000Z
Segmentation_UGSCNN/Projected_ResNet/MyDataLoader.py
Abdulah-Fawaz/Benchmarking-Surface-DL
9693379f26d57f9aabf28b973f40a9f6f627d26f
[ "MIT" ]
1
2022-02-27T17:38:19.000Z
2022-02-27T17:38:19.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Sep 15 17:29:27 2020 @author: fa19 """ import nibabel as nb import numpy as np import torch import random import torch.nn.functional as F means = np.load('../dHCP_mean_seg.npy') std = np.load('../dHCP_std_seg.npy') means = torch.from_numpy(means) stds = torch.from_numpy(std) ### SEGMENTATION FILES ### unwarped_files_directory = '/data/Data/benchmarking/fsaverage_32k_30_01_2021/ico6' unwarped_labels_directory = '/data/Data/dHCP/M-CRIB-S/template_space/ico6L' warped_files_directory = '/data/Data/benchmarking/fsaverage_32k_30_01_2021/ico6_warped' warped_labels_directory = '/data/Data/dHCP/M-CRIB-S/template_space/ico6L_warp' #unwarped_files_directory='/data/Data/derivatives_native_ico6_seg/features' #warped_files_directory='/data/Data/derivatives_native_ico6_seg/features_warp' #unwarped_labels_directory ='/data/Data/derivatives_native_ico6_seg/labels' #warped_labels_directory ='/data/Data/derivatives_native_ico6_seg/labels_warp' #means = torch.Tensor([0.0345]) #stds = torch.Tensor([0.1906]) test_rotation_arr = np.load('../GraphMethods/data/unseen_rots.npy').astype(int) rotation_arr = np.load('../GraphMethods/data/rotations_array.npy').astype(int) reversing_arr = np.load('../GraphMethods/data/reversing_arr.npy') #smoothing_arr = [[0, 10], [-12,34], [11,200], [-1,1]] # minima and maxima defines as mean +/- 4*std lower_bound = torch.Tensor([ -0.2819, -0.7279, -0.5199, -16.1347]) upper_bound = torch.Tensor([ 2.5354, 0.7970, 2.5550, 16.2459]) minima = torch.Tensor([ 0.0000, -0.7279, -0.3271, -14.8748]) maxima = torch.Tensor([ 2.5354, 0.7970, 2.5550, 12.1209]) xy_points = np.load('equirectangular_ico_6_points.npy') xy_points[:,0] = (xy_points[:,0] + 0.1)%1 grid = np.load('grid_170_square.npy') grid_x, grid_y = np.meshgrid(np.linspace(0.02, 0.98, 170), np.linspace(0.02, 0.98, 170)) grid[:,0] = grid_x.flatten() grid[:,1] = grid_y.flatten() from scipy.interpolate import griddata """ unwarped_files_directory: the directory of all the files in input_arr. BOTH L and R aarped_files_directory: the directory of all the warped files. warped directory could be the same as unwarped directory """ class My_dHCP_Data(torch.utils.data.Dataset): def __init__(self, input_arr, rotations = False, number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, sample_only = True, output_as_torch = True ): """ A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips. Fileanme style: in the array: only 'sub-X-ses-Y' but for the filenames themselves Left = 'sub-X_ses-Y_L' Right = 'sub-X_ses-Y_R' if warped: 'sub-X_ses-Y_L_W1' INPUT ARGS: 1. input_arr: Numpy array size Nx2 FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files LAST index must be the (float) label (OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age) 2 . rotations - boolean: to add rotations or not to add rotations 3. number of warps to include - INT NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES 4. Particy Choice (JMPORTANT!) - defines left and right-ness If: 'left'- will output ONLY LEFT If: 'both' - will randomly choose L or R If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together 5. smoothing - boolean, will clip extremal values according to the smoothing_array 6. normalisation - str. Will normalise according to 'range', 'std' or 'None' Range is from -1 to 1 Std is mean = 0, std = 1 7. output_as_torch - boolean: outputs values as torch Tensors if you want (usually yes) """ self.input_arr = input_arr self.image_files = input_arr[:,0] self.label = input_arr[:,-1] self.sample_only = sample_only self.rotations = rotations self.number_of_warps = number_of_warps self.parity = parity_choice self.smoothing = smoothing self.normalisation = normalisation self.output_as_torch = output_as_torch if self.number_of_warps != 0 and self.number_of_warps != None: self.directory = warped_files_directory else: self.directory = unwarped_files_directory def __len__(self): L = len(self.input_arr) if self.number_of_warps !=0: if self.sample_only == False: L = L*self.number_of_warps return L def __test_input_params__(self): assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels' assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)" assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both" if self.number_of_rotations != 0: assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero' assert self.rotations == bool, 'rotations must be boolean' assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range' def __genfilename__(self,idx): """ gets the appropriate file based on input parameters on PARITY and on WARPS """ # grab raw filename if self.number_of_warps != 0: warp_choice = str(1 + idx//len(self.input_arr)) idx = idx%len(self.input_arr) raw_filename = self.image_files[idx] # add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2 filename = [] if self.parity == 'left': filename.append(raw_filename + '_L') elif self.parity == 'both': coin_flip = random.randint(0,1) if coin_flip == 0: filename.append(raw_filename + '_L') elif coin_flip == 1: filename.append(raw_filename + '_R') elif self.parity == 'combined': filename.append(raw_filename + '_L') filename.append(raw_filename+'_R') # filename is now a list of the correct filenames. # now add warps if required if self.number_of_warps != 0: filename = [s + '_W'+warp_choice for s in filename ] return filename def __getitem__(self, idx): """ First load the images and collect them as numpy arrays then collect the label then collect the metadata (though might be None) """ filename = self.__genfilename__(idx) image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename] image = [] if self.rotations == True: rotation_choice = random.randint(0, len(rotation_arr)-1) if rotation_choice !=0: for file in image_gifti: image.extend(item.data[rotation_arr[rotation_choice]] for item in file) else: for file in image_gifti: image.extend(item.data for item in file) else: for file in image_gifti: image.extend(item.data for item in file) ### labels if self.number_of_warps != 0: idx = idx%len(self.input_arr) label = self.label[idx] ###### metadata grabbing if necessary if self.input_arr.shape[1] > 2: self.metadata = input_arr[:,1:-1] else: self.metadata = None if self.smoothing != False: for i in range(len(image)): image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item()) # torchify if required: if self.normalisation != None: if self.normalisation == 'std': for i in range(len(image)): image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item() elif self.normalisation == 'range': for i in range(len(image)): image[i] = image[i] - minima[i%len(minima)].item() image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item()) if self.output_as_torch: image = torch.Tensor( image ) label = torch.Tensor( [label] ) if self.metadata != None: metadata = torch.Tensor( [self.metadata] ) if self.metadata != None: sample = {'image': image, 'metadata' : self.metadata, 'label': label} else: sample = {'image': image,'label': label} return sample """ examples: file_arr = np.load('/home/fa19/Documents/dHCP_Data_merged/scan_age_regression_full_shuffled_18-08-2020.npy', allow_pickle = True) My_dHCP_Data(file_arr, rotations=True, smoothing = True, parity_choice='both') My_dHCP_Data(file_arr, rotations=True, smoothing = True, parity_choice='combined') My_dHCP_Data(file_arr, number_of_warps = 5, rotations=True, smoothing = False, parity_choice='left') """ def get_global_mean_and_std_from_ds(ds): nb_samples = 0 num_channels = ds[0]['image'].size(0) channel_mean = torch.zeros(num_channels) channel_var = torch.zeros(num_channels) #channel_std = torch.Tensor([0., 0., 0.]) for samples in ds: # scale image to be between 0 and 1 images = samples['image'] channel_mean += images.mean(1) channel_var += images.var(1) nb_samples += 1 channel_mean /= nb_samples channel_var /= nb_samples channel_std = np.sqrt(channel_var) return channel_mean, channel_std def get_global_min_and_max_from_ds(ds): num_channels = ds[0]['image'].size(0) running_minima = torch.ones(num_channels)*100 running_maxima = torch.ones(num_channels)*-100 for samples in ds: # scale image to be between 0 and 1 images = samples['image'] image_minima = torch.min(images, dim=1)[0] image_maxima = torch.max(images, dim=1)[0] for i in range(len(image_minima)): if image_minima[i] < running_minima[i]: running_minima[i] = image_minima[i].item() if image_maxima[i] > running_maxima[i]: running_maxima[i] = image_maxima[i].item() return running_minima, running_maxima class My_Projected_dHCP_Data(torch.utils.data.Dataset): def __init__(self, input_arr, rotations = False, number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True ): """ A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips. Fileanme style: in the array: only 'sub-X-ses-Y' but for the filenames themselves Left = 'sub-X_ses-Y_L' Right = 'sub-X_ses-Y_R' if warped: 'sub-X_ses-Y_L_W1' INPUT ARGS: 1. input_arr: Numpy array size Nx2 FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files LAST index must be the (float) label (OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age) 2 . rotations - boolean: to add rotations or not to add rotations 3. number of warps to include - INT NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES 4. Particy Choice (JMPORTANT!) - defines left and right-ness If: 'left'- will output ONLY LEFT If: 'both' - will randomly choose L or R If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together 5. smoothing - boolean, will clip extremal values according to the smoothing_array 6. normalisation - str. Will normalise according to 'range', 'std' or 'None' Range is from -1 to 1 Std is mean = 0, std = 1 7. output_as_torch - boolean: outputs values as torch Tensors if you want (usually yes) """ self.input_arr = input_arr self.image_files = input_arr[:,0] self.label = input_arr[:,-1] self.rotations = rotations self.projected = projected self.number_of_warps = number_of_warps self.parity = parity_choice self.smoothing = smoothing self.normalisation = normalisation self.sample_only = sample_only self.output_as_torch = output_as_torch if self.number_of_warps != 0 and self.number_of_warps != None: self.directory = warped_files_directory else: self.directory = unwarped_files_directory def __len__(self): L = len(self.input_arr) if self.number_of_warps !=0: if self.sample_only == False: L = L*self.number_of_warps return L def __test_input_params__(self): assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels' assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)" assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both" if self.number_of_rotations != 0: assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero' assert self.rotations == bool, 'rotations must be boolean' assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range' def __genfilename__(self,idx): """ gets the appropriate file based on input parameters on PARITY and on WARPS """ # grab raw filename raw_filename = self.image_files[idx] # add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2 filename = [] if self.parity == 'left': filename.append(raw_filename + '_L') elif self.parity == 'both': coin_flip = random.randint(0,1) if coin_flip == 0: filename.append(raw_filename + '_L') elif coin_flip == 1: filename.append(raw_filename + '_R') elif self.parity == 'combined': filename.append(raw_filename + '_L') filename.append(raw_filename+'_R') # filename is now a list of the correct filenames. # now add warps if required if self.number_of_warps != 0: warp_choice = str(random.randint(1,self.number_of_warps)) filename = [s + '_W'+warp_choice for s in filename ] return filename def __getitem__(self, idx): """ First load the images and collect them as numpy arrays then collect the label then collect the metadata (though might be None) """ filename = self.__genfilename__(idx) image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename] image = [] if self.rotations == True: rotation_choice = random.randint(0, len(rotation_arr)-1) if rotation_choice !=0: for file in image_gifti: image.extend(item.data[rotation_arr[rotation_choice]] for item in file) else: for file in image_gifti: image.extend(item.data for item in file) else: for file in image_gifti: image.extend(item.data for item in file) ### labels if self.number_of_warps != 0: idx = idx%len(self.input_arr) label = self.label[idx] ###### metadata grabbing if necessary if self.input_arr.shape[1] > 2: self.metadata = input_arr[:,1:-1] else: self.metadata = None if self.smoothing != False: for i in range(len(image)): image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item()) # torchify if required: if self.normalisation != None: if self.normalisation == 'std': for i in range(len(image)): image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item() elif self.normalisation == 'range': for i in range(len(image)): image[i] = image[i] - minima[i%len(minima)].item() image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item()) if self.output_as_torch: image = torch.Tensor( image ) label = torch.Tensor( [label] ) if self.metadata != None: metadata = torch.Tensor( [self.metadata] ) if self.projected == True: image = griddata(xy_points, image.T, grid, 'linear') image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1) if self.metadata != None: sample = {'image': image, 'metadata' : self.metadata, 'label': label} else: sample = {'image': image,'label': label} return sample class My_Projected_dHCP_Data_Segmentation(torch.utils.data.Dataset): def __init__(self, input_arr, rotations = False, number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True ): """ A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips. Fileanme style: in the array: only 'sub-X-ses-Y' but for the filenames themselves Left = 'sub-X_ses-Y_L' Right = 'sub-X_ses-Y_R' if warped: 'sub-X_ses-Y_L_W1' INPUT ARGS: 1. input_arr: Numpy array size Nx2 FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files LAST index must be the (float) label (OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age) 2 . rotations - boolean: to add rotations or not to add rotations 3. number of warps to include - INT NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES 4. Particy Choice (JMPORTANT!) - defines left and right-ness If: 'left'- will output ONLY LEFT If: 'both' - will randomly choose L or R If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together 5. smoothing - boolean, will clip extremal values according to the smoothing_array 6. normalisation - str. Will normalise according to 'range', 'std' or 'None' Range is from -1 to 1 Std is mean = 0, std = 1 7. output_as_torch - boolean: outputs values as torch Tensors if you want (usually yes) """ self.input_arr = input_arr self.image_files = input_arr[:,0] self.rotations = rotations self.projected = projected self.number_of_warps = number_of_warps self.parity = parity_choice self.smoothing = smoothing self.normalisation = normalisation self.sample_only = sample_only self.output_as_torch = output_as_torch if self.number_of_warps != 0 and self.number_of_warps != None: self.directory = warped_files_directory else: self.directory = unwarped_files_directory if self.number_of_warps != 0 and self.number_of_warps != None: self.label_directory = warped_labels_directory else: self.label_directory = unwarped_labels_directory def __len__(self): L = len(self.input_arr) if self.number_of_warps !=0: if self.sample_only == False: L = L*self.number_of_warps return L def __test_input_params__(self): assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels' assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)" assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both" if self.number_of_rotations != 0: assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero' assert self.rotations == bool, 'rotations must be boolean' assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range' def __genfilename__(self,idx, right): """ gets the appropriate file based on input parameters on PARITY and on WARPS """ # grab raw filename raw_filename = self.image_files[idx] # add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2 filename = [] if self.parity != 'combined': if right == True: filename.append(raw_filename + '_R') else: filename.append(raw_filename + '_L') # if self.parity == 'left': # filename.append(raw_filename + '_L') # # elif self.parity == 'both': # coin_flip = random.randint(0,1) # if coin_flip == 0: # filename.append(raw_filename + '_L') # elif coin_flip == 1: # filename.append(raw_filename + '_R') # right = True if self.parity == 'combined': filename.append(raw_filename + '_L') filename.append(raw_filename+'_R') # filename is now a list of the correct filenames. # now add warps if required if self.number_of_warps != 0: warp_choice = str(random.randint(0,self.number_of_warps)) if warp_choice !='0': filename = [s + '_W'+warp_choice for s in filename ] return filename def __getitem__(self, idx): """ First load the images and collect them as numpy arrays then collect the label then collect the metadata (though might be None) """ if self.parity == 'both': T = self.__len__()//2 idx, right = idx % T, idx // T filename = self.__genfilename__(idx, right) else: right = False filename = self.__genfilename__(idx, right) image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename] label_gifti = [nb.load(self.label_directory + '/'+individual_filename+'.label.gii').darrays for individual_filename in filename] image = [] label = [] if self.rotations == True: rotation_choice = random.randint(0, len(rotation_arr)-1) if rotation_choice !=0: for file in image_gifti: image.extend(item.data[rotation_arr[rotation_choice]] for item in file) for file in label_gifti: label.extend(item.data[rotation_arr[rotation_choice]] for item in file) else: for file in image_gifti: image.extend(item.data for item in file) for file in label_gifti: label.extend(item.data for item in file) else: for file in image_gifti: image.extend(item.data for item in file) for file in label_gifti: label.extend(item.data for item in file) if right == True: image = [item[reversing_arr] for item in image] label = [item[reversing_arr] for item in label] ###### metadata grabbing if necessary if self.input_arr.shape[1] > 2: self.metadata = input_arr[:,1:-1] else: self.metadata = None if self.smoothing != False: for i in range(len(image)): image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item()) # torchify if required: if self.normalisation != None: if self.normalisation == 'std': for i in range(len(image)): image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item() elif self.normalisation == 'range': for i in range(len(image)): image[i] = image[i] - minima[i%len(minima)].item() image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item()) if self.output_as_torch: image = torch.Tensor( image ) label = torch.Tensor( label ) if self.metadata != None: metadata = torch.Tensor( [self.metadata] ) if self.projected == True: image = griddata(xy_points, image.T, grid, 'nearest') image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1) label = griddata(xy_points, label.T, grid, 'nearest') label = torch.Tensor(label.reshape(170,170,1))#.permute(2,0,1) label = F.one_hot(label.to(torch.int64), 37).contiguous() label = label.squeeze() label = label.permute(2,0,1) if self.metadata != None: sample = {'image': image, 'metadata' : self.metadata, 'label': label} else: sample = {'image': image,'label': label} return sample class My_Projected_dHCP_Data_Segmentation_Test(torch.utils.data.Dataset): def __init__(self, input_arr, rotations = False, number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True ): """ A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips. Fileanme style: in the array: only 'sub-X-ses-Y' but for the filenames themselves Left = 'sub-X_ses-Y_L' Right = 'sub-X_ses-Y_R' if warped: 'sub-X_ses-Y_L_W1' INPUT ARGS: 1. input_arr: Numpy array size Nx2 FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files LAST index must be the (float) label (OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age) 2 . rotations - boolean: to add rotations or not to add rotations 3. number of warps to include - INT NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES 4. Particy Choice (JMPORTANT!) - defines left and right-ness If: 'left'- will output ONLY LEFT If: 'both' - will randomly choose L or R If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together 5. smoothing - boolean, will clip extremal values according to the smoothing_array 6. normalisation - str. Will normalise according to 'range', 'std' or 'None' Range is from -1 to 1 Std is mean = 0, std = 1 7. output_as_torch - boolean: outputs values as torch Tensors if you want (usually yes) """ self.input_arr = input_arr self.image_files = input_arr[:,0] self.rotations = rotations self.projected = projected self.number_of_warps = number_of_warps self.parity = parity_choice self.smoothing = smoothing self.normalisation = normalisation self.sample_only = sample_only self.output_as_torch = output_as_torch if self.number_of_warps != 0 and self.number_of_warps != None: self.directory = warped_files_directory else: self.directory = unwarped_files_directory if self.number_of_warps != 0 and self.number_of_warps != None: self.label_directory = warped_labels_directory else: self.label_directory = unwarped_labels_directory def __len__(self): L = len(self.input_arr) if self.number_of_warps !=0: if self.sample_only == False: L = L*self.number_of_warps return L def __test_input_params__(self): assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels' assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)" assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both" if self.number_of_rotations != 0: assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero' assert self.rotations == bool, 'rotations must be boolean' assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range' def __genfilename__(self,idx, right): """ gets the appropriate file based on input parameters on PARITY and on WARPS """ # grab raw filename raw_filename = self.image_files[idx] # add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2 filename = [] if self.parity != 'combined': if right == True: filename.append(raw_filename + '_R') else: filename.append(raw_filename + '_L') # if self.parity == 'left': # filename.append(raw_filename + '_L') # # elif self.parity == 'both': # coin_flip = random.randint(0,1) # if coin_flip == 0: # filename.append(raw_filename + '_L') # elif coin_flip == 1: # filename.append(raw_filename + '_R') # right = True if self.parity == 'combined': filename.append(raw_filename + '_L') filename.append(raw_filename+'_R') # filename is now a list of the correct filenames. # now add warps if required if self.number_of_warps != 0: warp_choice = str(random.randint(0,self.number_of_warps)) if warp_choice !='0': filename = [s + '_W'+warp_choice for s in filename ] return filename def __getitem__(self, idx): """ First load the images and collect them as numpy arrays then collect the label then collect the metadata (though might be None) """ if self.parity == 'both': T = self.__len__()//2 idx, right = idx % T, idx // T filename = self.__genfilename__(idx, right) else: right = False filename = self.__genfilename__(idx, right) image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename] label_gifti = [nb.load(self.label_directory + '/'+individual_filename+'.label.gii').darrays for individual_filename in filename] image = [] label = [] if self.rotations == True: rotation_choice = random.randint(1, len(test_rotation_arr)-1) if rotation_choice !=0: for file in image_gifti: image.extend(item.data[test_rotation_arr[rotation_choice]] for item in file) for file in label_gifti: label.extend(item.data[test_rotation_arr[rotation_choice]] for item in file) else: for file in image_gifti: image.extend(item.data for item in file) for file in label_gifti: label.extend(item.data for item in file) else: for file in image_gifti: image.extend(item.data for item in file) for file in label_gifti: label.extend(item.data for item in file) if right == True: image = [item[reversing_arr] for item in image] label = [item[reversing_arr] for item in label] ###### metadata grabbing if necessary if self.input_arr.shape[1] > 2: self.metadata = input_arr[:,1:-1] else: self.metadata = None if self.smoothing != False: for i in range(len(image)): image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item()) # torchify if required: if self.normalisation != None: if self.normalisation == 'std': for i in range(len(image)): image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item() elif self.normalisation == 'range': for i in range(len(image)): image[i] = image[i] - minima[i%len(minima)].item() image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item()) if self.output_as_torch: image = torch.Tensor( image ) label = torch.Tensor( label ) if self.metadata != None: metadata = torch.Tensor( [self.metadata] ) if self.projected == True: image = griddata(xy_points, image.T, grid, 'nearest') image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1) label = griddata(xy_points, label.T, grid, 'nearest') label = torch.Tensor(label.reshape(170,170,1))#.permute(2,0,1) label = F.one_hot(label.to(torch.int64), 37).contiguous() label = label.squeeze() label = label.permute(2,0,1) if self.metadata != None: sample = {'image': image, 'metadata' : self.metadata, 'label': label} else: sample = {'image': image,'label': label} return sample class My_Linear_Projected_dHCP_Data(torch.utils.data.Dataset): def __init__(self, input_arr, rotations = False, number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, output_as_torch = True ): """ A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips. Fileanme style: in the array: only 'sub-X-ses-Y' but for the filenames themselves Left = 'sub-X_ses-Y_L' Right = 'sub-X_ses-Y_R' if warped: 'sub-X_ses-Y_L_W1' INPUT ARGS: 1. input_arr: Numpy array size Nx2 FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files LAST index must be the (float) label (OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age) 2 . rotations - boolean: to add rotations or not to add rotations 3. number of warps to include - INT NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES 4. Particy Choice (JMPORTANT!) - defines left and right-ness If: 'left'- will output ONLY LEFT If: 'both' - will randomly choose L or R If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together 5. smoothing - boolean, will clip extremal values according to the smoothing_array 6. normalisation - str. Will normalise according to 'range', 'std' or 'None' Range is from -1 to 1 Std is mean = 0, std = 1 7. output_as_torch - boolean: outputs values as torch Tensors if you want (usually yes) """ self.input_arr = input_arr self.image_files = input_arr[:,0] self.label = input_arr[:,-1] self.rotations = rotations self.projected = projected self.number_of_warps = number_of_warps self.parity = parity_choice self.smoothing = smoothing self.normalisation = normalisation self.output_as_torch = output_as_torch if self.number_of_warps != 0 and self.number_of_warps != None: self.directory = warped_files_directory else: self.directory = unwarped_files_directory def __len__(self): L = len(self.input_arr) if self.number_of_warps !=0: L = L*self.number_of_warps return L def __test_input_params__(self): assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels' assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)" assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both" if self.number_of_rotations != 0: assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero' assert self.rotations == bool, 'rotations must be boolean' assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range' def __genfilename__(self,idx): """ gets the appropriate file based on input parameters on PARITY and on WARPS """ # grab raw filename raw_filename = self.image_files[idx] # add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2 filename = [] if self.parity == 'left': filename.append(raw_filename + '_L') elif self.parity == 'both': coin_flip = random.randint(0,1) if coin_flip == 0: filename.append(raw_filename + '_L') elif coin_flip == 1: filename.append(raw_filename + '_R') elif self.parity == 'combined': filename.append(raw_filename + '_L') filename.append(raw_filename+'_R') # filename is now a list of the correct filenames. # now add warps if required if self.number_of_warps != 0: warp_choice = str(random.randint(1,self.number_of_warps)) filename = [s + '_W'+warp_choice for s in filename ] return filename def __getitem__(self, idx): """ First load the images and collect them as numpy arrays then collect the label then collect the metadata (though might be None) """ filename = self.__genfilename__(idx) image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename] image = [] if self.rotations == True: rotation_choice = random.randint(0, len(rotation_arr)-1) if rotation_choice !=0: for file in image_gifti: image.extend(item.data[rotation_arr[rotation_choice]] for item in file) else: for file in image_gifti: image.extend(item.data for item in file) else: for file in image_gifti: image.extend(item.data for item in file) ### labels if self.number_of_warps != 0: idx = idx%len(self.input_arr) label = self.label[idx] ###### metadata grabbing if necessary if self.input_arr.shape[1] > 2: self.metadata = input_arr[:,1:-1] else: self.metadata = None if self.smoothing != False: for i in range(len(image)): image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item()) # torchify if required: if self.normalisation != None: if self.normalisation == 'std': for i in range(len(image)): image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item() elif self.normalisation == 'range': for i in range(len(image)): image[i] = image[i] - minima[i%len(minima)].item() image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item()) if self.output_as_torch: image = torch.Tensor( image ) label = torch.Tensor( [label] ) if self.metadata != None: metadata = torch.Tensor( [self.metadata] ) if self.projected == True: image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1) if self.metadata != None: sample = {'image': image, 'metadata' : self.metadata, 'label': label} else: sample = {'image': image,'label': label} return sample
34.033898
182
0.530441
5,596
48,192
4.414939
0.061294
0.028495
0.038412
0.03234
0.940581
0.928641
0.923379
0.917146
0.915041
0.905043
0
0.016306
0.382823
48,192
1,415
183
34.057951
0.814343
0.248257
0
0.891228
0
0
0.077444
0.010651
0
0
0
0
0.052632
1
0.047368
false
0
0.010526
0
0.096491
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bce0ed0aa53c5a99a6da90b880ed1dc0f5e37446
54,303
py
Python
trankit/pipeline.py
wdhxek/trankit
8d1ce9f1a00a86a3d4c87d2e9bfd17daba098bfc
[ "Apache-2.0" ]
1
2021-04-07T04:35:47.000Z
2021-04-07T04:35:47.000Z
trankit/pipeline.py
wdhxek/trankit
8d1ce9f1a00a86a3d4c87d2e9bfd17daba098bfc
[ "Apache-2.0" ]
null
null
null
trankit/pipeline.py
wdhxek/trankit
8d1ce9f1a00a86a3d4c87d2e9bfd17daba098bfc
[ "Apache-2.0" ]
null
null
null
from .config import config as master_config from .models.base_models import Multilingual_Embedding from .models.classifiers import TokenizerClassifier, PosDepClassifier, NERClassifier from .models.mwt_model import MWTWrapper from .models.lemma_model import LemmaWrapper from .iterators.tokenizer_iterators import TokenizeDatasetLive from .iterators.tagger_iterators import TaggerDatasetLive from .iterators.ner_iterators import NERDatasetLive from .utils.tokenizer_utils import * from collections import defaultdict from .utils.conll import * from .utils.tbinfo import tbname2training_id, lang2treebank from .utils.chuliu_edmonds import * from .adapter_transformers import XLMRobertaTokenizer from datetime import datetime import langid def is_string(input): if type(input) == str and len(input.strip()) > 0: return True return False def is_list_strings(input): if type(input) == list and len(input) > 0: for element in input: if not (type(element) == str and not element.isspace()): return False return True return False def is_list_list_strings(input): if type(input) == list and len(input) > 0 and type(input[0]) == list and len(input[0]) > 0: for element in input[0]: if not (type(element) == str and not element.isspace()): return False return True return False class Pipeline: def __init__(self, lang, cache_dir=None, gpu=True, embedding='xlm-roberta-base'): super(Pipeline, self).__init__() # auto detection of lang if lang == 'auto': lang = list(code2lang.values())[0] self.auto_mode = True else: self.auto_mode = False # set the embedding type assert embedding in supported_embeddings, '{} has not been supported.\nSupported embeddings: {}'.format( embedding, supported_embeddings) master_config.embedding_name = embedding self._cache_dir = cache_dir self._gpu = gpu self._use_gpu = gpu self._ud_eval = False self._setup_config(lang) self._config.training = False self.added_langs = [lang] for lang in self.added_langs: assert lang in lang2treebank, '{} has not been supported. Currently supported languages: {}'.format(lang, list( lang2treebank.keys())) # download saved model for initial language download( cache_dir=self._config._cache_dir, language=lang, saved_model_version='v1.0.0', # manually set this to avoid duplicated storage embedding_name=master_config.embedding_name ) # load ALL vocabs self._load_vocabs() # shared multilingual embeddings print('Loading pretrained XLM-Roberta, this may take a while...') self._embedding_layers = Multilingual_Embedding(self._config) self._embedding_layers.to(self._config.device) if self._use_gpu: self._embedding_layers.half() self._embedding_layers.eval() # tokenizers self._tokenizer = {} for lang in self.added_langs: self._tokenizer[lang] = TokenizerClassifier(self._config, treebank_name=lang2treebank[lang]) self._tokenizer[lang].to(self._config.device) if self._use_gpu: self._tokenizer[lang].half() self._tokenizer[lang].eval() # taggers self._tagger = {} for lang in self.added_langs: self._tagger[lang] = PosDepClassifier(self._config, treebank_name=lang2treebank[lang]) self._tagger[lang].to(self._config.device) if self._use_gpu: self._tagger[lang].half() self._tagger[lang].eval() # - mwt and lemma: self._mwt_model = {} for lang in self.added_langs: treebank_name = lang2treebank[lang] if tbname2training_id[treebank_name] % 2 == 1: self._mwt_model[lang] = MWTWrapper(self._config, treebank_name=treebank_name, use_gpu=self._use_gpu) self._lemma_model = {} for lang in self.added_langs: treebank_name = lang2treebank[lang] self._lemma_model[lang] = LemmaWrapper(self._config, treebank_name=treebank_name, use_gpu=self._use_gpu) # ner if possible self._ner_model = {} for lang in self.added_langs: if lang in langwithner: self._ner_model[lang] = NERClassifier(self._config, lang) self._ner_model[lang].to(self._config.device) if self._use_gpu: self._ner_model[lang].half() self._ner_model[lang].eval() # load and hold the pretrained weights self._embedding_weights = self._embedding_layers.state_dict() if self.auto_mode: for l in code2lang.values(): if l not in self.added_langs: self.add(l) # constrain the language set for auto mode langid.set_languages([lang2code[l] for l in self.added_langs]) self.code2lang = code2lang print('=' * 50) print('Trankit is in auto mode!\nAvailable languages: {}'.format(self.added_langs)) print('=' * 50) else: self.set_active(lang) def _setup_config(self, lang): torch.cuda.empty_cache() # decide whether to run on GPU or CPU if self._gpu and torch.cuda.is_available(): self._use_gpu = True master_config.device = torch.device('cuda') self._tokbatchsize = 6 self._tagbatchsize = 24 else: self._use_gpu = False master_config.device = torch.device('cpu') self._tokbatchsize = 2 self._tagbatchsize = 12 if self._cache_dir is None: master_config._cache_dir = 'cache/trankit' else: master_config._cache_dir = self._cache_dir if not os.path.exists(master_config._cache_dir): os.makedirs(master_config._cache_dir, exist_ok=True) master_config.wordpiece_splitter = XLMRobertaTokenizer.from_pretrained(master_config.embedding_name, cache_dir=os.path.join( master_config._cache_dir, master_config.embedding_name)) self._config = master_config self._config.max_input_length = tbname2max_input_length.get(lang2treebank[lang], 400) # this is for tokenizer only def set_auto(self, state): assert type(state) == bool if state is True: print('Turning on auto mode for {} ...'.format(self.added_langs)) self.auto_mode = True cls_codes = [] self.code2lang = {} for l in self.added_langs: if l in extra_lang2code: cls_codes.append(extra_lang2code[l]) self.code2lang[extra_lang2code[l]] = l langid.set_languages(cls_codes) print('=' * 50) print('Trankit is in auto mode!') print('=' * 50) else: self.auto_mode = False lang = self.added_langs[0] self._config.active_lang = lang self.active_lang = lang self._config.treebank_name = lang2treebank[lang] self._config.max_input_length = tbname2max_input_length.get(lang2treebank[lang], 400) # this is for tokenizer only print('=' * 50) print('Trankit is in normal mode!') print('=' * 50) print('Active language: {}'.format(self._config.active_lang)) print('Available languages: {}'.format(self.added_langs)) print('=' * 50) def set_active(self, lang): assert not self.auto_mode, 'Cannot set a particular language as active in auto mode.\nPlease consider using Trankit in the normal mode to use this function.' assert is_string( lang) and lang in self.added_langs, 'Specified language must be added before being activated.\nCurrent added languages: {}'.format( self.added_langs) self._config.active_lang = lang self.active_lang = lang self._config.treebank_name = lang2treebank[lang] self._config.max_input_length = tbname2max_input_length.get(lang2treebank[lang], 400) # this is for tokenizer only print('=' * 50) print('Active language: {}'.format(self._config.active_lang)) print('=' * 50) def add(self, lang): assert is_string( lang) and lang in supported_langs, 'Specified language must be one of the supported languages: {}'.format( supported_langs) # download saved models download( cache_dir=self._config._cache_dir, language=lang, saved_model_version='v1.0.0', # manually set this to avoid duplicated storage embedding_name=master_config.embedding_name ) # update vocabs treebank_name = lang2treebank[lang] with open(os.path.join(self._config._cache_dir, master_config.embedding_name, '{}/{}.vocabs.json'.format(treebank2lang[treebank_name], treebank2lang[treebank_name]))) as f: vocabs = json.load(f) self._config.vocabs[treebank_name] = vocabs if lang in langwithner: with open(os.path.join(self._config._cache_dir, master_config.embedding_name, '{}/{}.ner-vocab.json'.format(lang, lang))) as f: self._config.ner_vocabs[lang] = json.load(f) self._config.itos[lang][UPOS] = {v: k for k, v in vocabs[UPOS].items()} self._config.itos[lang][XPOS] = {v: k for k, v in vocabs[XPOS].items()} self._config.itos[lang][FEATS] = {v: k for k, v in vocabs[FEATS].items()} self._config.itos[lang][DEPREL] = {v: k for k, v in vocabs[DEPREL].items()} # add tokenizer self._tokenizer[lang] = TokenizerClassifier(self._config, treebank_name=lang2treebank[lang]) self._tokenizer[lang].to(self._config.device) if self._use_gpu: self._tokenizer[lang].half() self._tokenizer[lang].eval() # add tagger self._tagger[lang] = PosDepClassifier(self._config, treebank_name=lang2treebank[lang]) self._tagger[lang].to(self._config.device) if self._use_gpu: self._tagger[lang].half() self._tagger[lang].eval() # mwt if needed treebank_name = lang2treebank[lang] if tbname2training_id[treebank_name] % 2 == 1: self._mwt_model[lang] = MWTWrapper(self._config, treebank_name=treebank_name, use_gpu=self._use_gpu) # lemma self._lemma_model[lang] = LemmaWrapper(self._config, treebank_name=treebank_name, use_gpu=self._use_gpu) # ner if possible if lang in langwithner: self._ner_model[lang] = NERClassifier(self._config, lang) self._ner_model[lang].to(self._config.device) if self._use_gpu: self._ner_model[lang].half() self._ner_model[lang].eval() self.added_langs.append(lang) def _load_vocabs(self): self._config.vocabs = {} self._config.ner_vocabs = {} self._config.itos = defaultdict(dict) for lang in self.added_langs: treebank_name = lang2treebank[lang] with open(os.path.join(self._config._cache_dir, master_config.embedding_name, '{}/{}.vocabs.json'.format(lang, lang))) as f: vocabs = json.load(f) self._config.vocabs[treebank_name] = vocabs self._config.itos[lang][UPOS] = {v: k for k, v in vocabs[UPOS].items()} self._config.itos[lang][XPOS] = {v: k for k, v in vocabs[XPOS].items()} self._config.itos[lang][FEATS] = {v: k for k, v in vocabs[FEATS].items()} self._config.itos[lang][DEPREL] = {v: k for k, v in vocabs[DEPREL].items()} # ner vocabs if lang in langwithner: with open(os.path.join(self._config._cache_dir, master_config.embedding_name, '{}/{}.ner-vocab.json'.format(lang, lang))) as f: self._config.ner_vocabs[lang] = json.load(f) def _load_adapter_weights(self, model_name): assert model_name in ['tokenizer', 'tagger', 'ner'] if model_name == 'tokenizer': pretrained_weights = self._tokenizer[self._config.active_lang].pretrained_tokenizer_weights elif model_name == 'tagger': pretrained_weights = self._tagger[self._config.active_lang].pretrained_tagger_weights else: assert model_name == 'ner' pretrained_weights = self._ner_model[self._config.active_lang].pretrained_ner_weights for name, value in pretrained_weights.items(): if 'adapters.{}.adapter'.format(model_name) in name: target_name = name.replace('adapters.{}.adapter'.format(model_name), 'adapters.embedding.adapter') self._embedding_weights[target_name] = value self._embedding_layers.load_state_dict(self._embedding_weights) def _detect_lang_and_switch(self, text): detected_code = langid.classify(text)[0] assert detected_code in self.code2lang, 'Detected code "{}" must be in {}'.format(detected_code, self.code2lang.keys()) lang = self.code2lang[detected_code] assert is_string( lang) and lang in self.added_langs, 'Specified language must be added before being activated.\nCurrent added languages: {}'.format( self.added_langs) self._config.active_lang = lang self.active_lang = lang self._config.treebank_name = lang2treebank[lang] self._config.max_input_length = tbname2max_input_length.get(lang2treebank[lang], 400) # this is for tokenizer only # print('=' * 50) # print('Switching to {}'.format(lang)) # print('=' * 50) def ssplit(self, in_doc): # assuming input is a document assert is_string(in_doc), 'Input must be a non-empty string.' # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=in_doc) eval_batch_size = tbname2tokbatchsize.get(lang2treebank[self.active_lang], self._tokbatchsize) # load input text config = self._config test_set = TokenizeDatasetLive(config, in_doc, max_input_length=tbname2max_input_length.get( lang2treebank[self.active_lang], 400)) test_set.numberize(config.wordpiece_splitter) # load weights of tokenizer into the combined model self._load_adapter_weights(model_name='tokenizer') # make predictions wordpiece_pred_labels, wordpiece_ends, paragraph_indexes = [], [], [] for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): wordpiece_reprs = self._embedding_layers.get_tokenizer_inputs(batch) predictions = self._tokenizer[self._config.active_lang].predict(batch, wordpiece_reprs) wp_pred_labels, wp_ends, para_ids = predictions[0], predictions[1], predictions[2] wp_pred_labels = wp_pred_labels.data.cpu().numpy().tolist() for i in range(len(wp_pred_labels)): wordpiece_pred_labels.append(wp_pred_labels[i][: len(wp_ends[i])]) wordpiece_ends.extend(wp_ends) paragraph_indexes.extend(para_ids) torch.cuda.empty_cache() # mapping para_id_to_wp_pred_labels = defaultdict(list) for wp_pred_ls, wp_es, p_index in zip(wordpiece_pred_labels, wordpiece_ends, paragraph_indexes): para_id_to_wp_pred_labels[p_index].extend([(pred, char_position) for pred, char_position in zip(wp_pred_ls, wp_es)]) # get predictions corpus_text = in_doc paragraphs = [pt.rstrip() for pt in NEWLINE_WHITESPACE_RE.split(corpus_text) if len(pt.rstrip()) > 0] all_wp_preds = [] all_para_texts = [] all_para_starts = [] ############## cloned_raw_text = deepcopy(in_doc) global_offset = 0 for para_index, para_text in enumerate(paragraphs): cloned_raw_text, start_char_idx = get_start_char_idx(para_text, cloned_raw_text) start_char_idx += global_offset global_offset = start_char_idx + len(para_text) all_para_starts.append(start_char_idx) para_wp_preds = [0 for _ in para_text] for wp_l, end_position in para_id_to_wp_pred_labels[para_index]: para_wp_preds[end_position] = wp_l all_wp_preds.append(para_wp_preds) all_para_texts.append(para_text) ########################### sentences = [] for j in range(len(paragraphs)): para_text = all_para_texts[j] wp_pred = all_wp_preds[j] para_start = all_para_starts[j] current_tok = '' current_sent = [] local_position = 0 for t, wp_p in zip(para_text, wp_pred): local_position += 1 current_tok += t if wp_p >= 1: tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) <= 0: current_tok = '' continue additional_info = {DSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, wp_p, additional_info)] current_tok = '' if (wp_p == 2 or wp_p == 4): sent_span = (current_sent[0][2][DSPAN][0], current_sent[-1][2][DSPAN][1]) sentences.append( {ID: len(sentences) + 1, TEXT: in_doc[sent_span[0]: sent_span[1]], DSPAN: (sent_span[0], sent_span[1])}) current_sent = [] if len(current_tok): tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) > 0: additional_info = {DSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, 2, additional_info)] if len(current_sent): sent_span = (current_sent[0][2][DSPAN][0], current_sent[-1][2][DSPAN][1]) sentences.append( {ID: len(sentences) + 1, TEXT: in_doc[sent_span[0]: sent_span[1]], DSPAN: (sent_span[0], sent_span[1])}) torch.cuda.empty_cache() return {TEXT: in_doc, SENTENCES: sentences, LANG: self.active_lang} def tokenize(self, input, is_sent=False): assert is_string(input), 'Input must be a non-empty string.' # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) if type(input) == str and input.isspace(): return [] ori_text = deepcopy(input) if is_sent: return {TEXT: ori_text, TOKENS: self._tokenize_sent(in_sent=input), LANG: self.active_lang} else: return {TEXT: ori_text, SENTENCES: self._tokenize_doc(in_doc=input), LANG: self.active_lang} def _tokenize_sent(self, in_sent): # assuming input is a sentence eval_batch_size = tbname2tokbatchsize.get(lang2treebank[self.active_lang], self._tokbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 2) # load input text config = self._config test_set = TokenizeDatasetLive(config, in_sent, max_input_length=tbname2max_input_length.get( lang2treebank[self.active_lang], 400)) test_set.numberize(config.wordpiece_splitter) # load weights of tokenizer into the combined model self._load_adapter_weights(model_name='tokenizer') # make predictions wordpiece_pred_labels, wordpiece_ends, paragraph_indexes = [], [], [] for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): wordpiece_reprs = self._embedding_layers.get_tokenizer_inputs(batch) predictions = self._tokenizer[self._config.active_lang].predict(batch, wordpiece_reprs) wp_pred_labels, wp_ends, para_ids = predictions[0], predictions[1], predictions[2] wp_pred_labels = wp_pred_labels.data.cpu().numpy().tolist() for i in range(len(wp_pred_labels)): wordpiece_pred_labels.append(wp_pred_labels[i][: len(wp_ends[i])]) wordpiece_ends.extend(wp_ends) paragraph_indexes.extend(para_ids) # mapping para_id_to_wp_pred_labels = defaultdict(list) for wp_pred_ls, wp_es, p_index in zip(wordpiece_pred_labels, wordpiece_ends, paragraph_indexes): para_id_to_wp_pred_labels[p_index].extend([(pred, char_position) for pred, char_position in zip(wp_pred_ls, wp_es)]) # get predictions corpus_text = in_sent paragraphs = [pt.rstrip() for pt in NEWLINE_WHITESPACE_RE.split(corpus_text) if len(pt.rstrip()) > 0] all_wp_preds = [] all_para_texts = [] all_para_starts = [] ############## cloned_raw_text = deepcopy(in_sent) global_offset = 0 for para_index, para_text in enumerate(paragraphs): cloned_raw_text, start_char_idx = get_start_char_idx(para_text, cloned_raw_text) start_char_idx += global_offset global_offset = start_char_idx + len(para_text) all_para_starts.append(start_char_idx) para_wp_preds = [0 for _ in para_text] for wp_l, end_position in para_id_to_wp_pred_labels[para_index]: para_wp_preds[end_position] = wp_l all_wp_preds.append(para_wp_preds) all_para_texts.append(para_text) ########################### tokens = [] for j in range(len(paragraphs)): para_text = all_para_texts[j] wp_pred = all_wp_preds[j] para_start = all_para_starts[j] current_tok = '' current_sent = [] local_position = 0 for t, wp_p in zip(para_text, wp_pred): local_position += 1 current_tok += t if wp_p >= 1: tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) <= 0: current_tok = '' continue additional_info = {'current_len': len(tokens), SSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, wp_p, additional_info)] current_tok = '' if (wp_p == 2 or wp_p == 4): tokens += get_output_sentence(current_sent) current_sent = [] if len(current_tok): tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) > 0: additional_info = {'current_len': len(tokens), SSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, 2, additional_info)] if len(current_sent): tokens += get_output_sentence(current_sent) # multi-word expansion if required if tbname2training_id[self._config.treebank_name] % 2 == 1: tokens = self._mwt_expand([{TOKENS: tokens}])[0][TOKENS] torch.cuda.empty_cache() return tokens def _tokenize_doc(self, in_doc): # assuming input is a document eval_batch_size = tbname2tokbatchsize.get(lang2treebank[self.active_lang], self._tokbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 2) # load input text config = self._config test_set = TokenizeDatasetLive(config, in_doc, max_input_length=tbname2max_input_length.get( lang2treebank[self.active_lang], 400)) test_set.numberize(config.wordpiece_splitter) # load weights of tokenizer into the combined model self._load_adapter_weights(model_name='tokenizer') # make predictions wordpiece_pred_labels, wordpiece_ends, paragraph_indexes = [], [], [] for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): wordpiece_reprs = self._embedding_layers.get_tokenizer_inputs(batch) predictions = self._tokenizer[self._config.active_lang].predict(batch, wordpiece_reprs) wp_pred_labels, wp_ends, para_ids = predictions[0], predictions[1], predictions[2] wp_pred_labels = wp_pred_labels.data.cpu().numpy().tolist() for i in range(len(wp_pred_labels)): wordpiece_pred_labels.append(wp_pred_labels[i][: len(wp_ends[i])]) wordpiece_ends.extend(wp_ends) paragraph_indexes.extend(para_ids) # mapping para_id_to_wp_pred_labels = defaultdict(list) for wp_pred_ls, wp_es, p_index in zip(wordpiece_pred_labels, wordpiece_ends, paragraph_indexes): para_id_to_wp_pred_labels[p_index].extend([(pred, char_position) for pred, char_position in zip(wp_pred_ls, wp_es)]) # get predictions corpus_text = in_doc paragraphs = [pt.rstrip() for pt in NEWLINE_WHITESPACE_RE.split(corpus_text) if len(pt.rstrip()) > 0] all_wp_preds = [] all_para_texts = [] all_para_starts = [] ############## cloned_raw_text = deepcopy(in_doc) global_offset = 0 for para_index, para_text in enumerate(paragraphs): cloned_raw_text, start_char_idx = get_start_char_idx(para_text, cloned_raw_text) start_char_idx += global_offset global_offset = start_char_idx + len(para_text) all_para_starts.append(start_char_idx) para_wp_preds = [0 for _ in para_text] for wp_l, end_position in para_id_to_wp_pred_labels[para_index]: para_wp_preds[end_position] = wp_l all_wp_preds.append(para_wp_preds) all_para_texts.append(para_text) ########################### doc = [] for j in range(len(paragraphs)): para_text = all_para_texts[j] wp_pred = all_wp_preds[j] para_start = all_para_starts[j] current_tok = '' current_sent = [] local_position = 0 for t, wp_p in zip(para_text, wp_pred): local_position += 1 current_tok += t if wp_p >= 1: tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) <= 0: current_tok = '' continue additional_info = {DSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, wp_p, additional_info)] current_tok = '' if (wp_p == 2 or wp_p == 4): processed_sent = get_output_sentence(current_sent) doc.append({ ID: len(doc) + 1, TEXT: in_doc[processed_sent[0][DSPAN][0]: processed_sent[-1][DSPAN][ 1]], TOKENS: processed_sent, DSPAN: (processed_sent[0][DSPAN][0], processed_sent[-1][DSPAN][1]) }) current_sent = [] if len(current_tok): tok = normalize_token(test_set.treebank_name, current_tok, ud_eval=self._ud_eval) assert '\t' not in tok, tok if len(tok) > 0: additional_info = {DSPAN: (para_start + local_position - len(tok), para_start + local_position)} current_sent += [(tok, 2, additional_info)] if len(current_sent): processed_sent = get_output_sentence(current_sent) doc.append({ ID: len(doc) + 1, TEXT: in_doc[ processed_sent[0][DSPAN][0]: processed_sent[-1][DSPAN][1]], TOKENS: processed_sent, DSPAN: (processed_sent[0][DSPAN][0], processed_sent[-1][DSPAN][1]) }) # multi-word expansion if required if tbname2training_id[self._config.treebank_name] % 2 == 1: doc = self._mwt_expand(doc) torch.cuda.empty_cache() return doc def posdep(self, input, is_sent=False): if is_sent: assert is_string(input) or is_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of non-empty strings.' if is_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=' '.join(input)) input = [{ID: k + 1, TEXT: w} for k, w in enumerate(input)] return {TOKENS: self._posdep_sent(in_sent=input), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) return {TEXT: ori_text, TOKENS: self._posdep_sent(in_sent=input), LANG: self.active_lang} else: assert is_string(input) or is_list_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of lists of non-empty strings.' if is_list_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text='\n'.join([' '.join(sent) for sent in input])) input = [{ID: sid + 1, TOKENS: [{ID: tid + 1, TEXT: w} for tid, w in enumerate(sent)]} for sid, sent in enumerate(input)] return {SENTENCES: self._posdep_doc(in_doc=input), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) return {TEXT: ori_text, SENTENCES: self._posdep_doc(in_doc=input), LANG: self.active_lang} def _posdep_sent(self, in_sent): # assuming input is a sentence if type(in_sent) == str: # input sentence is an untokenized string in this case in_sent = self._tokenize_sent(in_sent) posdep_sent = deepcopy(in_sent) posdep_sent = [{ID: 1, TOKENS: posdep_sent}] # load outputs of tokenizer config = self._config test_set = TaggerDatasetLive( tokenized_doc=posdep_sent, wordpiece_splitter=config.wordpiece_splitter, config=config ) test_set.numberize() # load weights of tagger into the combined model self._load_adapter_weights(model_name='tagger') # make predictions eval_batch_size = tbname2tagbatchsize.get(self._config.treebank_name, self._tagbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 3) for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): batch_size = len(batch.word_num) word_reprs, cls_reprs = self._embedding_layers.get_tagger_inputs(batch) predictions = self._tagger[self._config.active_lang].predict(batch, word_reprs, cls_reprs) predicted_upos = predictions[0] predicted_xpos = predictions[1] predicted_feats = predictions[2] predicted_upos = predicted_upos.data.cpu().numpy().tolist() predicted_xpos = predicted_xpos.data.cpu().numpy().tolist() predicted_feats = predicted_feats.data.cpu().numpy().tolist() # head, deprel predicted_dep = predictions[3] sentlens = [l + 1 for l in batch.word_num] head_seqs = [chuliu_edmonds_one_root(adj[:l, :l])[1:] for adj, l in zip(predicted_dep[0], sentlens)] # remove attachment for the root deprel_seqs = [ [self._config.itos[self._config.active_lang][DEPREL][predicted_dep[1][i][j + 1][h]] for j, h in enumerate(hs)] for i, hs in enumerate(head_seqs)] pred_tokens = [[[head_seqs[i][j], deprel_seqs[i][j]] for j in range(sentlens[i] - 1)] for i in range(batch_size)] for bid in range(batch_size): sentid = batch.sent_index[bid] for i in range(batch.word_num[bid]): wordid = batch.word_ids[bid][i] # upos pred_upos_id = predicted_upos[bid][i] upos_name = self._config.itos[self._config.active_lang][UPOS][pred_upos_id] test_set.conllu_doc[sentid][wordid][UPOS] = upos_name # xpos pred_xpos_id = predicted_xpos[bid][i] xpos_name = self._config.itos[self._config.active_lang][XPOS][pred_xpos_id] test_set.conllu_doc[sentid][wordid][XPOS] = xpos_name # feats pred_feats_id = predicted_feats[bid][i] feats_name = self._config.itos[self._config.active_lang][FEATS][pred_feats_id] test_set.conllu_doc[sentid][wordid][FEATS] = feats_name # head test_set.conllu_doc[sentid][wordid][HEAD] = int(pred_tokens[bid][i][0]) # deprel test_set.conllu_doc[sentid][wordid][DEPREL] = pred_tokens[bid][i][1] tagged_doc = get_output_doc(posdep_sent, test_set.conllu_doc) torch.cuda.empty_cache() return tagged_doc[0][TOKENS] def _posdep_doc(self, in_doc): # assuming input is a document if type(in_doc) == str: # in_doc is an untokenized string in this case in_doc = self._tokenize_doc(in_doc) dposdep_doc = deepcopy(in_doc) # load outputs of tokenizer config = self._config test_set = TaggerDatasetLive( tokenized_doc=dposdep_doc, wordpiece_splitter=config.wordpiece_splitter, config=config ) test_set.numberize() # load weights of tagger into the combined model self._load_adapter_weights(model_name='tagger') # make predictions eval_batch_size = tbname2tagbatchsize.get(self._config.treebank_name, self._tagbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 3) for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): batch_size = len(batch.word_num) word_reprs, cls_reprs = self._embedding_layers.get_tagger_inputs(batch) predictions = self._tagger[self._config.active_lang].predict(batch, word_reprs, cls_reprs) predicted_upos = predictions[0] predicted_xpos = predictions[1] predicted_feats = predictions[2] predicted_upos = predicted_upos.data.cpu().numpy().tolist() predicted_xpos = predicted_xpos.data.cpu().numpy().tolist() predicted_feats = predicted_feats.data.cpu().numpy().tolist() # head, deprel predicted_dep = predictions[3] sentlens = [l + 1 for l in batch.word_num] head_seqs = [chuliu_edmonds_one_root(adj[:l, :l])[1:] for adj, l in zip(predicted_dep[0], sentlens)] # remove attachment for the root deprel_seqs = [ [self._config.itos[self._config.active_lang][DEPREL][predicted_dep[1][i][j + 1][h]] for j, h in enumerate(hs)] for i, hs in enumerate(head_seqs)] pred_tokens = [[[head_seqs[i][j], deprel_seqs[i][j]] for j in range(sentlens[i] - 1)] for i in range(batch_size)] for bid in range(batch_size): sentid = batch.sent_index[bid] for i in range(batch.word_num[bid]): wordid = batch.word_ids[bid][i] # upos pred_upos_id = predicted_upos[bid][i] upos_name = self._config.itos[self._config.active_lang][UPOS][pred_upos_id] test_set.conllu_doc[sentid][wordid][UPOS] = upos_name # xpos pred_xpos_id = predicted_xpos[bid][i] xpos_name = self._config.itos[self._config.active_lang][XPOS][pred_xpos_id] test_set.conllu_doc[sentid][wordid][XPOS] = xpos_name # feats pred_feats_id = predicted_feats[bid][i] feats_name = self._config.itos[self._config.active_lang][FEATS][pred_feats_id] test_set.conllu_doc[sentid][wordid][FEATS] = feats_name # head test_set.conllu_doc[sentid][wordid][HEAD] = int(pred_tokens[bid][i][0]) # deprel test_set.conllu_doc[sentid][wordid][DEPREL] = pred_tokens[bid][i][1] tagged_doc = get_output_doc(dposdep_doc, test_set.conllu_doc) torch.cuda.empty_cache() return tagged_doc def lemmatize(self, input, is_sent=False): if is_sent: assert is_string(input) or is_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of non-empty strings.' if is_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=' '.join(input)) input = [{ID: k + 1, TEXT: w} for k, w in enumerate(input)] return {TOKENS: self._lemmatize_sent(in_sent=input, obmit_tag=True), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) return {TEXT: ori_text, TOKENS: self._lemmatize_sent(in_sent=input, obmit_tag=True), LANG: self.active_lang} else: assert is_string(input) or is_list_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of lists of non-empty strings.' if is_list_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text='\n'.join([' '.join(sent) for sent in input])) input = [{ID: sid + 1, TOKENS: [{ID: tid + 1, TEXT: w} for tid, w in enumerate(sent)]} for sid, sent in enumerate(input)] return {SENTENCES: self._lemmatize_doc(in_doc=input, obmit_tag=True), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) return {TEXT: ori_text, SENTENCES: self._lemmatize_doc(in_doc=input, obmit_tag=True), LANG: self.active_lang} def _lemmatize_sent(self, in_sent, obmit_tag=False): if type(in_sent) == str: in_sent = self._tokenize_sent(in_sent) in_sent = self._posdep_sent(in_sent) dlemmatize_sent = deepcopy(in_sent) lemmatized_sent = \ self._lemma_model[self._config.active_lang].predict([{ID: 1, TOKENS: dlemmatize_sent}], obmit_tag)[0][ TOKENS] return lemmatized_sent def _lemmatize_doc(self, in_doc, obmit_tag=False): # assuming input is a document if type(in_doc) == str: # in_doc is a raw string in this case in_doc = self._tokenize_doc(in_doc) in_doc = self._posdep_doc(in_doc) dlemmatize_doc = deepcopy(in_doc) lemmatized_doc = self._lemma_model[self._config.active_lang].predict(dlemmatize_doc, obmit_tag) return lemmatized_doc def _mwt_expand(self, tokenized_doc): expanded_doc = self._mwt_model[self._config.active_lang].predict(tokenized_doc) return expanded_doc def ner(self, input, is_sent=False): if is_sent: assert is_string(input) or is_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of non-empty strings.' if is_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=' '.join(input)) assert self.active_lang in langwithner, 'NER module is not available for "{}"'.format(self.active_lang) input = [{ID: k + 1, TEXT: w} for k, w in enumerate(input)] return {TOKENS: self._ner_sent(in_sent=input), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) assert self.active_lang in langwithner, 'NER module is not available for "{}"'.format(self.active_lang) ori_text = deepcopy(input) return {TEXT: ori_text, TOKENS: self._ner_sent(in_sent=input), LANG: self.active_lang} else: assert is_string(input) or is_list_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of lists of non-empty strings.' if is_list_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text='\n'.join([' '.join(sent) for sent in input])) assert self.active_lang in langwithner, 'NER module is not available for "{}"'.format(self.active_lang) input = [{ID: sid + 1, TOKENS: [{ID: tid + 1, TEXT: w} for tid, w in enumerate(sent)]} for sid, sent in enumerate(input)] return {SENTENCES: self._ner_doc(in_doc=input), LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) assert self.active_lang in langwithner, 'NER module is not available for "{}"'.format(self.active_lang) ori_text = deepcopy(input) return {TEXT: ori_text, SENTENCES: self._ner_doc(in_doc=input), LANG: self.active_lang} def _ner_sent(self, in_sent): # assuming input is a document if type(in_sent) == str: in_sent = self._tokenize_sent(in_sent) dner_doc = [{ID: 1, TOKENS: deepcopy(in_sent)}] sentences = [[t[TEXT] for t in sentence[TOKENS]] for sentence in dner_doc] test_set = NERDatasetLive( config=self._config, tokenized_sentences=sentences ) test_set.numberize() # load ner adapter weights self._load_adapter_weights(model_name='ner') eval_batch_size = tbname2tagbatchsize.get(self._config.treebank_name, self._tagbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 3) for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): word_reprs, cls_reprs = self._embedding_layers.get_tagger_inputs(batch) pred_entity_labels = self._ner_model[self._config.active_lang].predict(batch, word_reprs) batch_size = len(batch.word_num) for bid in range(batch_size): sentid = batch.sent_index[bid] for i in range(batch.word_num[bid]): wordid = batch.word_ids[bid][i] # NER tag dner_doc[sentid][TOKENS][wordid][NER] = pred_entity_labels[bid][i] torch.cuda.empty_cache() return dner_doc[0][TOKENS] def _ner_doc(self, in_doc): # assuming input is a document if type(in_doc) == str: in_doc = self._tokenize_doc(in_doc) dner_doc = deepcopy(in_doc) sentences = [[t[TEXT] for t in sentence[TOKENS]] for sentence in dner_doc] test_set = NERDatasetLive( config=self._config, tokenized_sentences=sentences ) test_set.numberize() # load ner adapter weights self._load_adapter_weights(model_name='ner') eval_batch_size = tbname2tagbatchsize.get(self._config.treebank_name, self._tagbatchsize) if self._config.embedding_name == 'xlm-roberta-large': eval_batch_size = int(eval_batch_size / 3) for batch in DataLoader(test_set, batch_size=eval_batch_size, shuffle=False, collate_fn=test_set.collate_fn): word_reprs, cls_reprs = self._embedding_layers.get_tagger_inputs(batch) pred_entity_labels = self._ner_model[self._config.active_lang].predict(batch, word_reprs) batch_size = len(batch.word_num) for bid in range(batch_size): sentid = batch.sent_index[bid] for i in range(batch.word_num[bid]): wordid = batch.word_ids[bid][i] # NER tag dner_doc[sentid][TOKENS][wordid][NER] = pred_entity_labels[bid][i] torch.cuda.empty_cache() return dner_doc def __call__(self, input, is_sent=False): if is_sent: assert is_string(input) or is_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of non-empty strings.' if is_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=' '.join(input)) tokenized_sent = [{ID: k + 1, TEXT: w} for k, w in enumerate(input)] tagged_sent = self._posdep_sent(tokenized_sent) out = self._lemmatize_sent(tagged_sent) if self._config.active_lang in langwithner: # ner if possible out = self._ner_sent(out) final = {TOKENS: out, LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) tagged_sent = self._posdep_sent(input) out = self._lemmatize_sent(tagged_sent) if self._config.active_lang in langwithner: # ner if possible out = self._ner_sent(out) final = {TEXT: ori_text, TOKENS: out, LANG: self.active_lang} else: assert is_string(input) or is_list_list_strings( input), 'Input must be one of the following:\n(i) A non-empty string.\n(ii) A list of lists of non-empty strings.' if is_list_list_strings(input): # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text='\n'.join([' '.join(sent) for sent in input])) input = [{ID: sid + 1, TOKENS: [{ID: tid + 1, TEXT: w} for tid, w in enumerate(sent)]} for sid, sent in enumerate(input)] tagged_doc = self._posdep_doc(input) out = self._lemmatize_doc(tagged_doc) if self._config.active_lang in langwithner: # ner if possible out = self._ner_doc(out) final = {SENTENCES: out, LANG: self.active_lang} else: # switch to detected lang if auto mode is on if self.auto_mode: self._detect_lang_and_switch(text=input) ori_text = deepcopy(input) tagged_doc = self._posdep_doc(in_doc=input) out = self._lemmatize_doc(tagged_doc) if self._config.active_lang in langwithner: # ner if possible out = self._ner_doc(out) final = {TEXT: ori_text, SENTENCES: out, LANG: self.active_lang} return final def _conllu_predict(self, text_fpath): print('Running the pipeline on device={}'.format(self._config.device)) with open(text_fpath) as f: raw_text = f.read() print('Beginning tokenization') tokenized_doc = self._tokenize_doc(raw_text) print('Beginning pos tagging and dependency parsing') tagged_doc = self._posdep_doc(tokenized_doc) print('Beginning lemmatization') lemmatized_doc = self._lemmatize_doc(tagged_doc) conllu_doc = [] for sentence in lemmatized_doc: conllu_sentence = [] for token in sentence[TOKENS]: if type(token[ID]) == int or len(token[ID]) == 1: conllu_sentence.append(token) else: conllu_sentence.append(token) for word in token[EXPANDED]: conllu_sentence.append(word) conllu_doc.append(conllu_sentence) pred_lemma_fpath = text_fpath + '.pred' CoNLL.dict2conll(conllu_doc, pred_lemma_fpath) return pred_lemma_fpath
48.226465
166
0.565936
6,399
54,303
4.514455
0.058134
0.036693
0.017447
0.02077
0.825845
0.807844
0.788978
0.777209
0.768174
0.760454
0
0.007422
0.342449
54,303
1,125
167
48.269333
0.801608
0.04904
0
0.732584
0
0.010112
0.046035
0.000935
0
0
0
0
0.033708
1
0.030337
false
0
0.017978
0
0.088764
0.023596
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4c196fa1c178a8046261245a00e427b5e1eecea2
3,490
py
Python
flatland/database/population/drawing/closed_shape_fill_instances.py
erik-soederholm/flatland-model-diagram-editor
088e27cded9eca2cacba2c6168c03caf4b43ef72
[ "MIT" ]
10
2021-01-03T16:47:34.000Z
2022-03-30T18:47:07.000Z
flatland/database/population/drawing/closed_shape_fill_instances.py
modelint/flatland-model-diagram-editor
088e27cded9eca2cacba2c6168c03caf4b43ef72
[ "MIT" ]
91
2021-01-09T02:14:13.000Z
2022-02-24T10:24:10.000Z
flatland/database/population/drawing/closed_shape_fill_instances.py
erik-soederholm/flatland-model-diagram-editor
088e27cded9eca2cacba2c6168c03caf4b43ef72
[ "MIT" ]
1
2021-01-13T22:13:19.000Z
2021-01-13T22:13:19.000Z
""" closed_shape_fill_instances.py """ population = [ # Title block boxes {'Asset': 'Block border', 'Presentation': 'default', 'Drawing type': 'OS Engineer large frame', 'Fill': 'white'}, {'Asset': 'Block border', 'Presentation': 'default', 'Drawing type': 'OS Engineer medium frame', 'Fill': 'white'}, {'Asset': 'Block border', 'Presentation': 'default', 'Drawing type': 'TRI MBSE large frame', 'Fill': 'white'}, {'Asset': 'Block border', 'Presentation': 'default', 'Drawing type': 'TRI MBSE medium frame', 'Fill': 'white'}, # Starr/default symbols {'Asset': 'solid arrow', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'black'}, {'Asset': 'hollow arrow', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'gen arrow', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'class name compartment', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'class attribute compartment', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'class method compartment', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'imported class name compartment', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, {'Asset': 'imported class attribute compartment', 'Presentation': 'default', 'Drawing type': 'Starr class diagram', 'Fill': 'white'}, # Shlaer-Mellor/default symbols {'Asset': 'class name compartment', 'Presentation': 'default', 'Drawing type': 'Shlaer-Mellor class diagram', 'Fill': 'white'}, {'Asset': 'class attribute compartment', 'Presentation': 'default', 'Drawing type': 'Shlaer-Mellor class diagram', 'Fill': 'white'}, {'Asset': 'class method compartment', 'Presentation': 'default', 'Drawing type': 'Shlaer-Mellor class diagram', 'Fill': 'white'}, {'Asset': 'imported class compartment', 'Presentation': 'default', 'Drawing type': 'Shlaer-Mellor class diagram', 'Fill': 'white'}, # xUML/default {'Asset': 'class name compartment', 'Presentation': 'default', 'Drawing type': 'xUML class diagram', 'Fill': 'white'}, {'Asset': 'class attribute compartment', 'Presentation': 'default', 'Drawing type': 'xUML class diagram', 'Fill': 'white'}, {'Asset': 'class method compartment', 'Presentation': 'default', 'Drawing type': 'xUML class diagram', 'Fill': 'white'}, {'Asset': 'imported class compartment', 'Presentation': 'default', 'Drawing type': 'xUML class diagram', 'Fill': 'white'}, {'Asset': 'state name compartment', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'white'}, {'Asset': 'state name only compartment', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'white'}, {'Asset': 'state activity compartment', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'white'}, {'Asset': 'solid arrow', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'black'}, {'Asset': 'solid small dot', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'black'}, {'Asset': 'hollow large circle', 'Presentation': 'default', 'Drawing type': 'xUML state machine diagram', 'Fill': 'white'}, ]
68.431373
128
0.646132
365
3,490
6.169863
0.128767
0.219361
0.300178
0.346359
0.923623
0.913854
0.909858
0.899201
0.886767
0.832593
0
0
0.153868
3,490
50
129
69.8
0.762614
0.032665
0
0.263158
0
0
0.678752
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10