hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9225b3c1ee55f4d6994467863ce60bcc9130be6c
| 54,574
|
py
|
Python
|
src/genie/libs/parser/junos/tests/ShowChassisEnvironment/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/junos/tests/ShowChassisEnvironment/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/junos/tests/ShowChassisEnvironment/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'environment-information': {
'environment-item': [{
'class': 'Temp',
'name': 'PSM 0',
'status': 'OK',
'temperature': {
'#text': '25 '
'degrees '
'C '
'/ '
'77 '
'degrees '
'F',
'@junos:celsius': '25'
}
}, {
'class': 'Temp',
'name': 'PSM 1',
'status': 'OK',
'temperature': {
'#text': '24 '
'degrees '
'C '
'/ '
'75 '
'degrees '
'F',
'@junos:celsius': '24'
}
}, {
'class': 'Temp',
'name': 'PSM 2',
'status': 'OK',
'temperature': {
'#text': '24 '
'degrees '
'C '
'/ '
'75 '
'degrees '
'F',
'@junos:celsius': '24'
}
}, {
'class': 'Temp',
'name': 'PSM 3',
'status': 'OK',
'temperature': {
'#text': '23 '
'degrees '
'C '
'/ '
'73 '
'degrees '
'F',
'@junos:celsius': '23'
}
}, {
'class': 'Temp',
'name': 'PSM 4',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 5',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 6',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 7',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 8',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 9',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'PSM 10',
'status': 'OK',
'temperature': {
'#text': '30 '
'degrees '
'C '
'/ '
'86 '
'degrees '
'F',
'@junos:celsius': '30'
}
}, {
'class': 'Temp',
'name': 'PSM 11',
'status': 'OK',
'temperature': {
'#text': '30 '
'degrees '
'C '
'/ '
'86 '
'degrees '
'F',
'@junos:celsius': '30'
}
}, {
'class': 'Temp',
'name': 'PSM 12',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 13',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 14',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 15',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 16',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PSM 17',
'status': 'Check'
}, {
'class': 'Temp',
'name': 'PDM 0',
'status': 'OK'
}, {
'class': 'Temp',
'name': 'PDM 1',
'status': 'OK'
}, {
'class': 'Temp',
'name': 'PDM 2',
'status': 'OK'
}, {
'class': 'Temp',
'name': 'PDM 3',
'status': 'OK'
}, {
'class': 'Temp',
'name': 'CB 0 IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '39 '
'degrees '
'C '
'/ '
'102 '
'degrees '
'F',
'@junos:celsius': '39'
}
}, {
'class': 'Temp',
'name': 'CB 0 IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '36 '
'degrees '
'C '
'/ '
'96 '
'degrees '
'F',
'@junos:celsius': '36'
}
}, {
'class': 'Temp',
'name': 'CB 0 IntakeC-Zone0',
'status': 'OK',
'temperature': {
'#text': '51 '
'degrees '
'C '
'/ '
'123 '
'degrees '
'F',
'@junos:celsius': '51'
}
}, {
'class': 'Temp',
'name': 'CB 0 '
'ExhaustA-Zone0',
'status': 'OK',
'temperature': {
'#text': '40 '
'degrees '
'C '
'/ '
'104 '
'degrees '
'F',
'@junos:celsius': '40'
}
}, {
'class': 'Temp',
'name': 'CB 0 '
'ExhaustB-Zone1',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'CB 0 TCBC-Zone0',
'status': 'OK',
'temperature': {
'#text': '45 '
'degrees '
'C '
'/ '
'113 '
'degrees '
'F',
'@junos:celsius': '45'
}
}, {
'class': 'Temp',
'name': 'CB 1 IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'CB 1 IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'CB 1 IntakeC-Zone0',
'status': 'OK',
'temperature': {
'#text': '33 '
'degrees '
'C '
'/ '
'91 '
'degrees '
'F',
'@junos:celsius': '33'
}
}, {
'class': 'Temp',
'name': 'CB 1 '
'ExhaustA-Zone0',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'CB 1 '
'ExhaustB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'CB 1 TCBC-Zone0',
'status': 'OK',
'temperature': {
'#text': '39 '
'degrees '
'C '
'/ '
'102 '
'degrees '
'F',
'@junos:celsius': '39'
}
}, {
'class': 'Temp',
'name': 'SPMB 0 Intake',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SPMB 1 Intake',
'status': 'OK',
'temperature': {
'#text': '33 '
'degrees '
'C '
'/ '
'91 '
'degrees '
'F',
'@junos:celsius': '33'
}
}, {
'class': 'Temp',
'name': 'Routing Engine 0',
'status': 'OK',
'temperature': {
'#text': '43 '
'degrees '
'C '
'/ '
'109 '
'degrees '
'F',
'@junos:celsius': '43'
}
}, {
'class': 'Temp',
'name': 'Routing Engine 0 '
'CPU',
'status': 'OK',
'temperature': {
'#text': '39 '
'degrees '
'C '
'/ '
'102 '
'degrees '
'F',
'@junos:celsius': '39'
}
}, {
'class': 'Temp',
'name': 'Routing Engine 1',
'status': 'OK',
'temperature': {
'#text': '40 '
'degrees '
'C '
'/ '
'104 '
'degrees '
'F',
'@junos:celsius': '40'
}
}, {
'class': 'Temp',
'name': 'Routing Engine 1 '
'CPU',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 0 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '37 '
'degrees '
'C '
'/ '
'98 '
'degrees '
'F',
'@junos:celsius': '37'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '45 '
'degrees '
'C '
'/ '
'113 '
'degrees '
'F',
'@junos:celsius': '45'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '36 '
'degrees '
'C '
'/ '
'96 '
'degrees '
'F',
'@junos:celsius': '36'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '63 '
'degrees '
'C '
'/ '
'145 '
'degrees '
'F',
'@junos:celsius': '63'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '55 '
'degrees '
'C '
'/ '
'131 '
'degrees '
'F',
'@junos:celsius': '55'
}
}, {
'class': 'Temp',
'name': 'SFB 0 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'SFB 1 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '63 '
'degrees '
'C '
'/ '
'145 '
'degrees '
'F',
'@junos:celsius': '63'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 1 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'SFB 2 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '30 '
'degrees '
'C '
'/ '
'86 '
'degrees '
'F',
'@junos:celsius': '30'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '60 '
'degrees '
'C '
'/ '
'140 '
'degrees '
'F',
'@junos:celsius': '60'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 2 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '56 '
'degrees '
'C '
'/ '
'132 '
'degrees '
'F',
'@junos:celsius': '56'
}
}, {
'class': 'Temp',
'name': 'SFB 3 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '61 '
'degrees '
'C '
'/ '
'141 '
'degrees '
'F',
'@junos:celsius': '61'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 3 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'SFB 4 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '64 '
'degrees '
'C '
'/ '
'147 '
'degrees '
'F',
'@junos:celsius': '64'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 4 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'SFB 5 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '41 '
'degrees '
'C '
'/ '
'105 '
'degrees '
'F',
'@junos:celsius': '41'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '31 '
'degrees '
'C '
'/ '
'87 '
'degrees '
'F',
'@junos:celsius': '31'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '63 '
'degrees '
'C '
'/ '
'145 '
'degrees '
'F',
'@junos:celsius': '63'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 5 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'SFB 6 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '34 '
'degrees '
'C '
'/ '
'93 '
'degrees '
'F',
'@junos:celsius': '34'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '62 '
'degrees '
'C '
'/ '
'143 '
'degrees '
'F',
'@junos:celsius': '62'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'SFB 6 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '49 '
'degrees '
'C '
'/ '
'120 '
'degrees '
'F',
'@junos:celsius': '49'
}
}, {
'class': 'Temp',
'name': 'SFB 7 Intake-Zone0',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'Exhaust-Zone1',
'status': 'OK',
'temperature': {
'#text': '43 '
'degrees '
'C '
'/ '
'109 '
'degrees '
'F',
'@junos:celsius': '43'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'IntakeA-Zone0',
'status': 'OK',
'temperature': {
'#text': '31 '
'degrees '
'C '
'/ '
'87 '
'degrees '
'F',
'@junos:celsius': '31'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'IntakeB-Zone1',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'Exhaust-Zone0',
'status': 'OK',
'temperature': {
'#text': '35 '
'degrees '
'C '
'/ '
'95 '
'degrees '
'F',
'@junos:celsius': '35'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'SFB-XF2-Zone1',
'status': 'OK',
'temperature': {
'#text': '65 '
'degrees '
'C '
'/ '
'149 '
'degrees '
'F',
'@junos:celsius': '65'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'SFB-XF1-Zone0',
'status': 'OK',
'temperature': {
'#text': '56 '
'degrees '
'C '
'/ '
'132 '
'degrees '
'F',
'@junos:celsius': '56'
}
}, {
'class': 'Temp',
'name': 'SFB 7 '
'SFB-XF0-Zone0',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 Intake',
'status': 'OK',
'temperature': {
'#text': '29 '
'degrees '
'C '
'/ '
'84 '
'degrees '
'F',
'@junos:celsius': '29'
}
}, {
'class': 'Temp',
'name': 'FPC 0 Exhaust A',
'status': 'OK',
'temperature': {
'#text': '53 '
'degrees '
'C '
'/ '
'127 '
'degrees '
'F',
'@junos:celsius': '53'
}
}, {
'class': 'Temp',
'name': 'FPC 0 Exhaust B',
'status': 'OK',
'temperature': {
'#text': '54 '
'degrees '
'C '
'/ '
'129 '
'degrees '
'F',
'@junos:celsius': '54'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 TSen',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 Chip',
'status': 'OK',
'temperature': {
'#text': '63 '
'degrees '
'C '
'/ '
'145 '
'degrees '
'F',
'@junos:celsius': '63'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 XR2 0 '
'TSen',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 XR2 0 '
'Chip',
'status': 'OK',
'temperature': {
'#text': '80 '
'degrees '
'C '
'/ '
'176 '
'degrees '
'F',
'@junos:celsius': '80'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 XR2 1 '
'TSen',
'status': 'OK',
'temperature': {
'#text': '50 '
'degrees '
'C '
'/ '
'122 '
'degrees '
'F',
'@junos:celsius': '50'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 0 XR2 1 '
'Chip',
'status': 'OK',
'temperature': {
'#text': '80 '
'degrees '
'C '
'/ '
'176 '
'degrees '
'F',
'@junos:celsius': '80'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 TSen',
'status': 'OK',
'temperature': {
'#text': '36 '
'degrees '
'C '
'/ '
'96 '
'degrees '
'F',
'@junos:celsius': '36'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 Chip',
'status': 'OK',
'temperature': {
'#text': '44 '
'degrees '
'C '
'/ '
'111 '
'degrees '
'F',
'@junos:celsius': '44'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 XR2 0 '
'TSen',
'status': 'OK',
'temperature': {
'#text': '36 '
'degrees '
'C '
'/ '
'96 '
'degrees '
'F',
'@junos:celsius': '36'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 XR2 0 '
'Chip',
'status': 'OK',
'temperature': {
'#text': '60 '
'degrees '
'C '
'/ '
'140 '
'degrees '
'F',
'@junos:celsius': '60'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 XR2 1 '
'TSen',
'status': 'OK',
'temperature': {
'#text': '36 '
'degrees '
'C '
'/ '
'96 '
'degrees '
'F',
'@junos:celsius': '36'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XL 1 XR2 1 '
'Chip',
'status': 'OK',
'temperature': {
'#text': '59 '
'degrees '
'C '
'/ '
'138 '
'degrees '
'F',
'@junos:celsius': '59'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 0 TSen',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 0 Chip',
'status': 'OK',
'temperature': {
'#text': '62 '
'degrees '
'C '
'/ '
'143 '
'degrees '
'F',
'@junos:celsius': '62'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 1 TSen',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 1 Chip',
'status': 'OK',
'temperature': {
'#text': '57 '
'degrees '
'C '
'/ '
'134 '
'degrees '
'F',
'@junos:celsius': '57'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 2 TSen',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 2 Chip',
'status': 'OK',
'temperature': {
'#text': '51 '
'degrees '
'C '
'/ '
'123 '
'degrees '
'F',
'@junos:celsius': '51'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 3 TSen',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 XM 3 Chip',
'status': 'OK',
'temperature': {
'#text': '45 '
'degrees '
'C '
'/ '
'113 '
'degrees '
'F',
'@junos:celsius': '45'
}
}, {
'class': 'Temp',
'name': 'FPC 0 PCIe Switch '
'TSen',
'status': 'OK',
'temperature': {
'#text': '52 '
'degrees '
'C '
'/ '
'125 '
'degrees '
'F',
'@junos:celsius': '52'
}
}, {
'class': 'Temp',
'name': 'FPC 0 PCIe Switch '
'Chip',
'status': 'OK',
'temperature': {
'#text': '30 '
'degrees '
'C '
'/ '
'86 '
'degrees '
'F',
'@junos:celsius': '30'
}
}, {
'class': 'Temp',
'name': 'FPC 9 Intake',
'status': 'OK',
'temperature': {
'#text': '31 '
'degrees '
'C '
'/ '
'87 '
'degrees '
'F',
'@junos:celsius': '31'
}
}, {
'class': 'Temp',
'name': 'FPC 9 Exhaust A',
'status': 'OK',
'temperature': {
'#text': '48 '
'degrees '
'C '
'/ '
'118 '
'degrees '
'F',
'@junos:celsius': '48'
}
}, {
'class': 'Temp',
'name': 'FPC 9 Exhaust B',
'status': 'OK',
'temperature': {
'#text': '41 '
'degrees '
'C '
'/ '
'105 '
'degrees '
'F',
'@junos:celsius': '41'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 0 TCAM '
'TSen',
'status': 'OK',
'temperature': {
'#text': '46 '
'degrees '
'C '
'/ '
'114 '
'degrees '
'F',
'@junos:celsius': '46'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 0 TCAM '
'Chip',
'status': 'OK',
'temperature': {
'#text': '55 '
'degrees '
'C '
'/ '
'131 '
'degrees '
'F',
'@junos:celsius': '55'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 0 TSen',
'status': 'OK',
'temperature': {
'#text': '46 '
'degrees '
'C '
'/ '
'114 '
'degrees '
'F',
'@junos:celsius': '46'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 0 Chip',
'status': 'OK',
'temperature': {
'#text': '55 '
'degrees '
'C '
'/ '
'131 '
'degrees '
'F',
'@junos:celsius': '55'
}
}, {
'class': 'Temp',
'name': 'FPC 9 MQ 0 TSen',
'status': 'OK',
'temperature': {
'#text': '46 '
'degrees '
'C '
'/ '
'114 '
'degrees '
'F',
'@junos:celsius': '46'
}
}, {
'class': 'Temp',
'name': 'FPC 9 MQ 0 Chip',
'status': 'OK',
'temperature': {
'#text': '57 '
'degrees '
'C '
'/ '
'134 '
'degrees '
'F',
'@junos:celsius': '57'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 1 TCAM '
'TSen',
'status': 'OK',
'temperature': {
'#text': '41 '
'degrees '
'C '
'/ '
'105 '
'degrees '
'F',
'@junos:celsius': '41'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 1 TCAM '
'Chip',
'status': 'OK',
'temperature': {
'#text': '46 '
'degrees '
'C '
'/ '
'114 '
'degrees '
'F',
'@junos:celsius': '46'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 1 TSen',
'status': 'OK',
'temperature': {
'#text': '41 '
'degrees '
'C '
'/ '
'105 '
'degrees '
'F',
'@junos:celsius': '41'
}
}, {
'class': 'Temp',
'name': 'FPC 9 LU 1 Chip',
'status': 'OK',
'temperature': {
'#text': '47 '
'degrees '
'C '
'/ '
'116 '
'degrees '
'F',
'@junos:celsius': '47'
}
}, {
'class': 'Temp',
'name': 'FPC 9 MQ 1 TSen',
'status': 'OK',
'temperature': {
'#text': '41 '
'degrees '
'C '
'/ '
'105 '
'degrees '
'F',
'@junos:celsius': '41'
}
}, {
'class': 'Temp',
'name': 'FPC 9 MQ 1 Chip',
'status': 'OK',
'temperature': {
'#text': '47 '
'degrees '
'C '
'/ '
'116 '
'degrees '
'F',
'@junos:celsius': '47'
}
}, {
'class': 'Temp',
'name': 'ADC 9 Intake',
'status': 'OK',
'temperature': {
'#text': '32 '
'degrees '
'C '
'/ '
'89 '
'degrees '
'F',
'@junos:celsius': '32'
}
}, {
'class': 'Temp',
'name': 'ADC 9 Exhaust',
'status': 'OK',
'temperature': {
'#text': '42 '
'degrees '
'C '
'/ '
'107 '
'degrees '
'F',
'@junos:celsius': '42'
}
}, {
'class': 'Temp',
'name': 'ADC 9 ADC-XF1',
'status': 'OK',
'temperature': {
'#text': '49 '
'degrees '
'C '
'/ '
'120 '
'degrees '
'F',
'@junos:celsius': '49'
}
}, {
'class': 'Temp',
'name': 'ADC 9 ADC-XF0',
'status': 'OK',
'temperature': {
'#text': '59 '
'degrees '
'C '
'/ '
'138 '
'degrees '
'F',
'@junos:celsius': '59'
}
}, {
'class': 'Fans',
'comment': '2760 RPM',
'name': 'Fan Tray 0 Fan 1',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 0 Fan 2',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 0 Fan 3',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 0 Fan 4',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 0 Fan 5',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 0 Fan 6',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 1 Fan 1',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 1 Fan 2',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 1 Fan 3',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 1 Fan 4',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 1 Fan 5',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 1 Fan 6',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 2 Fan 1',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 2 Fan 2',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 2 Fan 3',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 2 Fan 4',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 2 Fan 5',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 2 Fan 6',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 3 Fan 1',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2400 RPM',
'name': 'Fan Tray 3 Fan 2',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 3 Fan 3',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 3 Fan 4',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2640 RPM',
'name': 'Fan Tray 3 Fan 5',
'status': 'OK'
}, {
'class': 'Fans',
'comment': '2520 RPM',
'name': 'Fan Tray 3 Fan 6',
'status': 'OK'
}]
}
}
| 25.693974
| 41
| 0.234727
| 3,139
| 54,574
| 4.080599
| 0.050335
| 0.100554
| 0.150207
| 0.238816
| 0.95956
| 0.946678
| 0.874229
| 0.847841
| 0.846202
| 0.842064
| 0
| 0.062942
| 0.614267
| 54,574
| 2,123
| 42
| 25.706076
| 0.545532
| 0
| 0
| 0.874706
| 0
| 0
| 0.269671
| 0.000421
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
925cbff7b4fe629ab439be60df8604f24ff66bc7
| 130
|
py
|
Python
|
kbc_pul/experiments_utils/file_utils.py
|
ML-KULeuven/KBC-as-PU-Learning
|
a00f606bd40ca06af0a5627e65a4582859976918
|
[
"Apache-2.0"
] | 4
|
2021-12-14T16:13:47.000Z
|
2022-01-21T13:14:14.000Z
|
kbc_pul/experiments_utils/file_utils.py
|
ML-KULeuven/KBC-as-PU-Learning
|
a00f606bd40ca06af0a5627e65a4582859976918
|
[
"Apache-2.0"
] | null | null | null |
kbc_pul/experiments_utils/file_utils.py
|
ML-KULeuven/KBC-as-PU-Learning
|
a00f606bd40ca06af0a5627e65a4582859976918
|
[
"Apache-2.0"
] | null | null | null |
import os
def print_file_exists(filename: str) -> None:
print(f"? file exists: {filename}\n-> {os.path.exists(filename)}")
| 18.571429
| 70
| 0.676923
| 19
| 130
| 4.526316
| 0.631579
| 0.488372
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 130
| 6
| 71
| 21.666667
| 0.774775
| 0
| 0
| 0
| 0
| 0
| 0.434109
| 0.20155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
927f4c79aa83e7fff26f8f45893a1eaf912e2026
| 58
|
py
|
Python
|
json_schema_checker/validators/__init__.py
|
zorgulle/json_schema_checker
|
20cac68f899528619e5059f0e1fbee0a0f7219d6
|
[
"MIT"
] | null | null | null |
json_schema_checker/validators/__init__.py
|
zorgulle/json_schema_checker
|
20cac68f899528619e5059f0e1fbee0a0f7219d6
|
[
"MIT"
] | null | null | null |
json_schema_checker/validators/__init__.py
|
zorgulle/json_schema_checker
|
20cac68f899528619e5059f0e1fbee0a0f7219d6
|
[
"MIT"
] | null | null | null |
from .validators import Int
from .validators import String
| 29
| 30
| 0.844828
| 8
| 58
| 6.125
| 0.625
| 0.571429
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 30
| 29
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
929e8a325b9be258a38a47e650d3a9382c02adaa
| 15,486
|
py
|
Python
|
regionsSP/source/summary.py
|
abdelhadisamir/covid-19-SEIAR
|
187afb1ad4dccb1a4544b54eb7cda3d61d2c601f
|
[
"MIT"
] | 2
|
2020-05-12T07:32:42.000Z
|
2021-07-26T09:41:17.000Z
|
regionsSP/source/summary.py
|
abdelhadisamir/covid-19-SEIAR
|
187afb1ad4dccb1a4544b54eb7cda3d61d2c601f
|
[
"MIT"
] | null | null | null |
regionsSP/source/summary.py
|
abdelhadisamir/covid-19-SEIAR
|
187afb1ad4dccb1a4544b54eb7cda3d61d2c601f
|
[
"MIT"
] | null | null | null |
if districtRegion1=="DRS 05 - Barretos":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 01 - Grande São Paulo":
date="2020-03-15"
#initial condition for susceptible
s0=280.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=80
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=1500
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of infected
ratioRecovered=0.1
#weigth for fitting data
weigthCases=0.6
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 04 - Baixada Santista":
date="2020-04-01"
#initial condition for susceptible
s0=8.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=150
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 06 - Bauru":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 17 - Taubaté":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=17
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=2
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 06 - Bauru":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 13 - Ribeirão Preto":
date="2020-03-25"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=5
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.3
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 02 - Araçatuba":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 09 - Marília":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 07 - Campinas":
date="2020-04-01"
#initial condition for susceptible
s0=20.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=40
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.5
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 11 - Presidente Prudente":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 10 - Piracicaba":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 12 - Registro":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 14 - São João da Boa Vista":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 15 - São José do Rio Preto":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 14 - São João da Boa Vista":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 16 - Sorocaba":
date="2020-04-01"
#initial condition for susceptible
s0=1.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 03 - Araraquara":
date="2020-03-25"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=0
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 03 - Araraquara":
date="2020-03-25"
#initial condition for susceptible
s0=2.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=0
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.5
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
| 32.465409
| 84
| 0.596216
| 1,885
| 15,486
| 4.888064
| 0.057294
| 0.19796
| 0.235077
| 0.138919
| 0.973084
| 0.969611
| 0.969611
| 0.96603
| 0.96603
| 0.951704
| 0
| 0.074425
| 0.345796
| 15,486
| 476
| 85
| 32.533613
| 0.835061
| 0
| 0
| 0.882591
| 0
| 0
| 0.079251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
92adf2c4c13c901877b09563e05bc3de942a3158
| 4,608
|
py
|
Python
|
chainer_bcnn/functions/loss/noised_cross_entropy.py
|
yuta-hi/bayesian_unet
|
cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6
|
[
"MIT"
] | 36
|
2019-12-04T02:09:25.000Z
|
2022-03-31T07:18:40.000Z
|
chainer_bcnn/functions/loss/noised_cross_entropy.py
|
keisuke-uemura/bayesian_unet
|
cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6
|
[
"MIT"
] | 2
|
2019-12-03T06:35:07.000Z
|
2020-06-14T23:14:13.000Z
|
chainer_bcnn/functions/loss/noised_cross_entropy.py
|
keisuke-uemura/bayesian_unet
|
cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6
|
[
"MIT"
] | 8
|
2020-12-07T03:43:22.000Z
|
2022-02-02T03:39:40.000Z
|
from __future__ import absolute_import
from chainer import backend
from chainer import functions as F
from chainer.functions import sigmoid_cross_entropy
from chainer.functions import softmax_cross_entropy
from .sigmoid_soft_cross_entropy import sigmoid_soft_cross_entropy
def noised_softmax_cross_entropy(y, t, mc_iteration,
normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False):
""" Softmax Cross-entropy for aleatoric uncertainty estimates.
See: https://arxiv.org/pdf/1703.04977.pdf
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += softmax_cross_entropy(logits + noise, t,
normalize=False,
cache_score=cache_score,
class_weight=class_weight,
ignore_label=ignore_label,
reduce='no',
enable_double_backprop=enable_double_backprop)
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_soft_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Soft Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape == log_std.shape
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_soft_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
| 30.516556
| 95
| 0.591146
| 575
| 4,608
| 4.586087
| 0.175652
| 0.062571
| 0.045127
| 0.04361
| 0.810011
| 0.810011
| 0.793326
| 0.778915
| 0.764505
| 0.764505
| 0
| 0.011229
| 0.304253
| 4,608
| 150
| 96
| 30.72
| 0.811291
| 0.269965
| 0
| 0.733333
| 0
| 0
| 0.009294
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b832fc4edfc9eb726a140832521968248c37fd7
| 1,223
|
py
|
Python
|
chapter_2/name_cases.py
|
superbe/PythonCrashCourse
|
c8781f68b0e9e68e54d48cce5224ecb6a5625ae2
|
[
"MIT"
] | null | null | null |
chapter_2/name_cases.py
|
superbe/PythonCrashCourse
|
c8781f68b0e9e68e54d48cce5224ecb6a5625ae2
|
[
"MIT"
] | null | null | null |
chapter_2/name_cases.py
|
superbe/PythonCrashCourse
|
c8781f68b0e9e68e54d48cce5224ecb6a5625ae2
|
[
"MIT"
] | null | null | null |
name = 'eric Pearson'
# Упражнение 3.
message = f'Hello {name}, would you like to learn some Python today?'
print(message)
# Упражнение 4.
message = f'Hello {name.lower()}, would you like to learn some Python today?'
print(message)
message = f'Hello {name.upper()}, would you like to learn some Python today?'
print(message)
message = f'Hello {name.title()}, would you like to learn some Python today?'
print(message)
# Упражнение 5.
message = f'Albert Einstein once said, "A person who never made a mistake never tried anything new."'
print(message)
# Упражнение 6.
famous_person = 'Albert Einstein'
message = f'{famous_person.title()} once said, "A person who never made a mistake never tried anything new."'
print(message)
# Упражнение 7.
famous_person = ' \t\nAlbert Einstein \t\n'
print(f'|{famous_person} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.lstrip()} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.rstrip()} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.strip()} once said, "A person who never made a mistake never tried anything new."|')
| 38.21875
| 109
| 0.732625
| 193
| 1,223
| 4.606218
| 0.227979
| 0.094488
| 0.060742
| 0.101237
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0
| 0.004789
| 0.146361
| 1,223
| 31
| 110
| 39.451613
| 0.846743
| 0.056419
| 0
| 0.315789
| 0
| 0.210526
| 0.75784
| 0.084495
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.526316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
2b8d46e20478051b98cf25878001dd95e0c89cd8
| 47,862
|
py
|
Python
|
google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
ryanyuan/python-bigtable
|
e55ca07561f9c946276f3bde599e69947769f560
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
ryanyuan/python-bigtable
|
e55ca07561f9c946276f3bde599e69947769f560
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
ryanyuan/python-bigtable
|
e55ca07561f9c946276f3bde599e69947769f560
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2,
)
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
)
self.UpdateBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
)
self.DeleteBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListBackups = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString,
)
self.RestoreTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTableFromSnapshot(self, request, context):
"""Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GenerateConsistencyToken(self, request, context):
"""Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CheckConsistency(self, request, context):
"""Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SnapshotTable(self, request, context):
"""Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSnapshot(self, request, context):
"""Gets metadata information about the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSnapshots(self, request, context):
"""Lists all snapshots associated with the specified cluster.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSnapshot(self, request, context):
"""Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateBackup(self, request, context):
"""Starts creating a new Cloud Bigtable Backup. The returned backup
[long-running operation][google.longrunning.Operation] can be used to
track creation of the backup. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The
[response][google.longrunning.Operation.response] field type is
[Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the
returned operation will stop the creation and delete the backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBackup(self, request, context):
"""Gets metadata on a pending or completed Cloud Bigtable Backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateBackup(self, request, context):
"""Updates a pending or completed Cloud Bigtable Backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteBackup(self, request, context):
"""Deletes a pending or completed Cloud Bigtable backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBackups(self, request, context):
"""Lists Cloud Bigtable backups. Returns both completed and pending
backups.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RestoreTable(self, request, context):
"""Create a new table by restoring from a completed backup. The new table
must be in the same instance as the instance containing the backup. The
returned table [long-running operation][google.longrunning.Operation] can
be used to track the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
[RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The
[response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists but does not have a policy
set.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy on a Table or Backup resource.
Replaces any existing policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that the caller has on the specified table resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateTable": grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ListTables": grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
"GetTable": grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"DeleteTable": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"DropRowRange": grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
"CheckConsistency": grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
"SnapshotTable": grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString,
),
"ListSnapshots": grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
"DeleteSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateBackup": grpc.unary_unary_rpc_method_handler(
servicer.CreateBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetBackup": grpc.unary_unary_rpc_method_handler(
servicer.GetBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString,
),
"UpdateBackup": grpc.unary_unary_rpc_method_handler(
servicer.UpdateBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString,
),
"DeleteBackup": grpc.unary_unary_rpc_method_handler(
servicer.DeleteBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListBackups": grpc.unary_unary_rpc_method_handler(
servicer.ListBackups,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString,
),
"RestoreTable": grpc.unary_unary_rpc_method_handler(
servicer.RestoreTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BigtableTableAdmin(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
@staticmethod
def CreateTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CreateTableFromSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListTables(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ModifyColumnFamilies(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DropRowRange(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GenerateConsistencyToken(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CheckConsistency(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SnapshotTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListSnapshots(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CreateBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def UpdateBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListBackups(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def RestoreTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetIamPolicy(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetIamPolicy(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def TestIamPermissions(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 43.869844
| 166
| 0.704672
| 4,888
| 47,862
| 6.410188
| 0.060966
| 0.057926
| 0.067979
| 0.049915
| 0.865126
| 0.858967
| 0.832892
| 0.803147
| 0.760923
| 0.750838
| 0
| 0.008482
| 0.236409
| 47,862
| 1,090
| 167
| 43.910092
| 0.848852
| 0.112657
| 0
| 0.709071
| 1
| 0
| 0.094126
| 0.063871
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050885
| false
| 0
| 0.007743
| 0.024336
| 0.086283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2be7223c4ab9ef92f6e5ecb73db51d52ed5811e3
| 132
|
py
|
Python
|
quine_examples/quine_list.py
|
mbrown1413/Arbitrary-Quine
|
758d55a590074d94f0b0f71dd0312923265a5a36
|
[
"MIT"
] | 2
|
2016-07-18T14:05:48.000Z
|
2021-12-05T11:35:06.000Z
|
quine_examples/quine_list.py
|
mbrown1413/Arbitrary-Quine
|
758d55a590074d94f0b0f71dd0312923265a5a36
|
[
"MIT"
] | null | null | null |
quine_examples/quine_list.py
|
mbrown1413/Arbitrary-Quine
|
758d55a590074d94f0b0f71dd0312923265a5a36
|
[
"MIT"
] | null | null | null |
lines = ['print "lines =", lines', 'for line in lines:', ' print line']
print "lines =", lines
for line in lines:
print line
| 26.4
| 74
| 0.613636
| 19
| 132
| 4.263158
| 0.263158
| 0.37037
| 0.37037
| 0.444444
| 0.938272
| 0.938272
| 0.938272
| 0.938272
| 0.938272
| 0
| 0
| 0
| 0.227273
| 132
| 4
| 75
| 33
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.462121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 12
|
9210dc5e1c681b47bc7424501d4cc31b8599ef0b
| 4,763
|
py
|
Python
|
tests/epyccel/test_epyccel_transpose.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 206
|
2018-06-28T00:28:47.000Z
|
2022-03-29T05:17:03.000Z
|
tests/epyccel/test_epyccel_transpose.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 670
|
2018-07-23T11:02:24.000Z
|
2022-03-30T07:28:05.000Z
|
tests/epyccel/test_epyccel_transpose.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 19
|
2019-09-19T06:01:00.000Z
|
2022-03-29T05:17:06.000Z
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
from numpy.random import randint
from pyccel.epyccel import epyccel
def test_transpose_shape(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
from numpy import transpose
y = transpose(x)
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_transpose_property(language):
def f1(x : 'int[:,:]'):
y = x.T
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_transpose_in_expression(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)+3
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T*3
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_mixed_order(language):
def f1(x : 'int[:,:]'):
from numpy import transpose, ones
n, m = x.shape
y = ones((m,n), order='F')
z = x+transpose(y)
n, m = z.shape
return n, m, z[-1,0], z[0,-1]
def f2(x : 'int[:,:]'):
from numpy import transpose, ones
n, m = x.shape
y = ones((m,n), order='F')
z = x.transpose()+y
n, m = z.shape
return n, m, z[-1,0], z[0,-1]
def f3(x : 'int[:,:,:]'):
from numpy import transpose, ones
n, m, p = x.shape
y = ones((p,m,n))
z = transpose(x)+y
n, m, p = z.shape
return n, m, p, z[0,-1,0], z[0,0,-1], z[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x1 ) == f2_epyc( x1 )
f3_epyc = epyccel(f3, language=language)
assert f3( x2 ) == f3_epyc( x2 )
def test_transpose_pointer(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)
x[0,-1] += 22
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T
x[0,-1,0] += 11
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x1_copy = x1.copy()
x2 = randint(50, size=(2,3,7))
x2_copy = x2.copy()
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1_copy )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2_copy )
def test_transpose_of_expression(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x*2)+3
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = (x*2).T*3
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_force_transpose(language):
def f1(x : 'int[:,:]'):
from numpy import transpose, empty
n,m = x.shape
y = empty((m,n))
y[:,:] = transpose(x)
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
from numpy import empty
n,m,p = x.shape
y = empty((p,m,n))
y[:,:,:] = x.transpose()
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
| 28.183432
| 71
| 0.505774
| 802
| 4,763
| 2.941397
| 0.069825
| 0.029674
| 0.020348
| 0.082662
| 0.845697
| 0.820687
| 0.813056
| 0.802459
| 0.802459
| 0.765155
| 0
| 0.088676
| 0.299181
| 4,763
| 168
| 72
| 28.35119
| 0.618035
| 0.014487
| 0
| 0.737226
| 0
| 0
| 0.028986
| 0
| 0
| 0
| 0
| 0
| 0.109489
| 1
| 0.160584
| false
| 0
| 0.087591
| 0
| 0.357664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a62e40b59d5193d55d8c169993defb3ea0af6a2d
| 161,186
|
py
|
Python
|
tests/test_drive_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 7
|
2021-08-18T00:42:05.000Z
|
2022-03-14T09:49:15.000Z
|
tests/test_drive_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | null | null | null |
tests/test_drive_sample.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 1
|
2022-03-14T09:49:20.000Z
|
2022-03-14T09:49:20.000Z
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed")
def mock_raw_request(*args, **kwargs):
raise pylark.PyLarkError(
scope="scope", func="func", code=1, msg="mock-raw-request-failed"
)
# mock get token
class TestDriveSampleMockGetTokenFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDriveSampleMockGetTokenFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed
self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed
self.module_cli = self.cli.drive
def test_mock_get_token_get_drive_file_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_meta(pylark.GetDriveFileMetaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_file(pylark.CreateDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_copy_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.copy_drive_file(pylark.CopyDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_file(pylark.DeleteDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_drive_sheet_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_sheet_file(pylark.DeleteDriveSheetFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_folder(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_folder(pylark.CreateDriveFolderReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_meta(pylark.GetDriveFolderMetaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_root_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_root_folder_meta(
pylark.GetDriveRootFolderMetaReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_folder_children(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_children(
pylark.GetDriveFolderChildrenReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_file_statistics(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_statistics(
pylark.GetDriveFileStatisticsReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_download_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_file(pylark.DownloadDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_file(pylark.UploadDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_prepare_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_file(
pylark.PrepareUploadDriveFileReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_part_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_file(pylark.PartUploadDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_finish_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_file(pylark.FinishUploadDriveFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_download_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_media(pylark.DownloadDriveMediaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_media(pylark.UploadDriveMediaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_prepare_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_media(
pylark.PrepareUploadDriveMediaReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_part_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_media(pylark.PartUploadDriveMediaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_finish_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_media(
pylark.FinishUploadDriveMediaReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission_old(
pylark.CreateDriveMemberPermissionOldReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_transfer_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.transfer_drive_member_permission(
pylark.TransferDriveMemberPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_member_permission_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_member_permission_list(
pylark.GetDriveMemberPermissionListReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission(
pylark.CreateDriveMemberPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission_old(
pylark.DeleteDriveMemberPermissionOldReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission(
pylark.DeleteDriveMemberPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission_old(
pylark.UpdateDriveMemberPermissionOldReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission(
pylark.UpdateDriveMemberPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_check_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.check_drive_member_permission(
pylark.CheckDriveMemberPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_public_permission_v1_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v1_old(
pylark.UpdateDrivePublicPermissionV1OldReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_public_permission_v2_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v2_old(
pylark.UpdateDrivePublicPermissionV2OldReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_public_permission_v2(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_public_permission_v2(
pylark.GetDrivePublicPermissionV2Req()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_public_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission(
pylark.UpdateDrivePublicPermissionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_get_drive_media_tmp_download_url(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_drive_media_tmp_download_url(
pylark.BatchGetDriveMediaTmpDownloadURLReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_comment_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment_list(pylark.GetDriveCommentListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment(pylark.GetDriveCommentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_comment(pylark.CreateDriveCommentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment(pylark.UpdateDriveCommentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_comment(pylark.DeleteDriveCommentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_drive_comment_patch(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment_patch(
pylark.UpdateDriveCommentPatchReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_doc(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_doc(pylark.CreateDriveDocReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_doc_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_content(pylark.GetDriveDocContentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_doc_raw_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_raw_content(pylark.GetDriveDocRawContentReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_doc_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_meta(pylark.GetDriveDocMetaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet(pylark.CreateSheetReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_meta(pylark.GetSheetMetaReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_property(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_property(pylark.UpdateSheetPropertyReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_update_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_sheet(pylark.BatchUpdateSheetReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_import_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.import_sheet(pylark.ImportSheetReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_import_task(pylark.CreateDriveImportTaskReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_import_task(pylark.GetDriveImportTaskReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_move_sheet_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_sheet_dimension(pylark.MoveSheetDimensionReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_prepend_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepend_sheet_value(pylark.PrependSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_append_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.append_sheet_value(pylark.AppendSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_insert_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.insert_sheet_dimension_range(
pylark.InsertSheetDimensionRangeReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_add_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_sheet_dimension_range(
pylark.AddSheetDimensionRangeReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_dimension_range(
pylark.UpdateSheetDimensionRangeReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_dimension_range(
pylark.DeleteSheetDimensionRangeReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_value(pylark.GetSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_sheet_value(pylark.BatchGetSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value(pylark.SetSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_value(pylark.BatchSetSheetValueReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_style(pylark.SetSheetStyleReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_style(pylark.BatchSetSheetStyleReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_merge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.merge_sheet_cell(pylark.MergeSheetCellReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_unmerge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.unmerge_sheet_cell(pylark.UnmergeSheetCellReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_set_sheet_value_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value_image(pylark.SetSheetValueImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_find_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.find_sheet(pylark.FindSheetReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_replace_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.replace_sheet(pylark.ReplaceSheetReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_condition_format(
pylark.CreateSheetConditionFormatReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_condition_format(
pylark.GetSheetConditionFormatReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_condition_format(
pylark.UpdateSheetConditionFormatReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_condition_format(
pylark.DeleteSheetConditionFormatReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_protected_dimension(
pylark.CreateSheetProtectedDimensionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_protected_dimension(
pylark.GetSheetProtectedDimensionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_protected_dimension(
pylark.UpdateSheetProtectedDimensionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_protected_dimension(
pylark.DeleteSheetProtectedDimensionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_data_validation_dropdown(
pylark.CreateSheetDataValidationDropdownReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_data_validation_dropdown(
pylark.DeleteSheetDataValidationDropdownReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_data_validation_dropdown(
pylark.UpdateSheetDataValidationDropdownReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_data_validation_dropdown(
pylark.GetSheetDataValidationDropdownReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter(pylark.CreateSheetFilterReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter(pylark.DeleteSheetFilterReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter(pylark.UpdateSheetFilterReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter(pylark.GetSheetFilterReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view(pylark.CreateSheetFilterViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view(pylark.DeleteSheetFilterViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view(pylark.UpdateSheetFilterViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view(pylark.GetSheetFilterViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_query_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view(pylark.QuerySheetFilterViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view_condition(
pylark.CreateSheetFilterViewConditionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view_condition(
pylark.DeleteSheetFilterViewConditionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view_condition(
pylark.UpdateSheetFilterViewConditionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view_condition(
pylark.GetSheetFilterViewConditionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_query_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view_condition(
pylark.QuerySheetFilterViewConditionReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_float_image(pylark.CreateSheetFloatImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_float_image(pylark.DeleteSheetFloatImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_float_image(pylark.UpdateSheetFloatImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_float_image(pylark.GetSheetFloatImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_query_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_float_image(pylark.QuerySheetFloatImageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_wiki_space_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space_list(pylark.GetWikiSpaceListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_wiki_space(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space(pylark.GetWikiSpaceReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_wiki_space_setting(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_wiki_space_setting(
pylark.UpdateWikiSpaceSettingReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_add_wiki_space_member(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_wiki_space_member(pylark.AddWikiSpaceMemberReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_wiki_node(pylark.CreateWikiNodeReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_wiki_node_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node_list(pylark.GetWikiNodeListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node(pylark.GetWikiNodeReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_move_docs_to_wiki(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_docs_to_wiki(pylark.MoveDocsToWikiReq())
assert "msg=failed" in f"{e}"
# mock mock self func
class TestDriveSampleMockSelfFuncFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDriveSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.drive
def test_mock_self_func_get_drive_file_meta(self):
origin_func = self.module_cli.get_drive_file_meta
self.module_cli.get_drive_file_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_meta(pylark.GetDriveFileMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_file_meta = origin_func
def test_mock_self_func_create_drive_file(self):
origin_func = self.module_cli.create_drive_file
self.module_cli.create_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_file(pylark.CreateDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_file = origin_func
def test_mock_self_func_copy_drive_file(self):
origin_func = self.module_cli.copy_drive_file
self.module_cli.copy_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.copy_drive_file(pylark.CopyDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.copy_drive_file = origin_func
def test_mock_self_func_delete_drive_file(self):
origin_func = self.module_cli.delete_drive_file
self.module_cli.delete_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_file(pylark.DeleteDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_drive_file = origin_func
def test_mock_self_func_delete_drive_sheet_file(self):
origin_func = self.module_cli.delete_drive_sheet_file
self.module_cli.delete_drive_sheet_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_sheet_file(pylark.DeleteDriveSheetFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_drive_sheet_file = origin_func
def test_mock_self_func_create_drive_folder(self):
origin_func = self.module_cli.create_drive_folder
self.module_cli.create_drive_folder = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_folder(pylark.CreateDriveFolderReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_folder = origin_func
def test_mock_self_func_get_drive_folder_meta(self):
origin_func = self.module_cli.get_drive_folder_meta
self.module_cli.get_drive_folder_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_meta(pylark.GetDriveFolderMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_folder_meta = origin_func
def test_mock_self_func_get_drive_root_folder_meta(self):
origin_func = self.module_cli.get_drive_root_folder_meta
self.module_cli.get_drive_root_folder_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_root_folder_meta(
pylark.GetDriveRootFolderMetaReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_root_folder_meta = origin_func
def test_mock_self_func_get_drive_folder_children(self):
origin_func = self.module_cli.get_drive_folder_children
self.module_cli.get_drive_folder_children = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_children(
pylark.GetDriveFolderChildrenReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_folder_children = origin_func
def test_mock_self_func_get_drive_file_statistics(self):
origin_func = self.module_cli.get_drive_file_statistics
self.module_cli.get_drive_file_statistics = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_statistics(
pylark.GetDriveFileStatisticsReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_file_statistics = origin_func
def test_mock_self_func_download_drive_file(self):
origin_func = self.module_cli.download_drive_file
self.module_cli.download_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_file(pylark.DownloadDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.download_drive_file = origin_func
def test_mock_self_func_upload_drive_file(self):
origin_func = self.module_cli.upload_drive_file
self.module_cli.upload_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_file(pylark.UploadDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.upload_drive_file = origin_func
def test_mock_self_func_prepare_upload_drive_file(self):
origin_func = self.module_cli.prepare_upload_drive_file
self.module_cli.prepare_upload_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_file(
pylark.PrepareUploadDriveFileReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.prepare_upload_drive_file = origin_func
def test_mock_self_func_part_upload_drive_file(self):
origin_func = self.module_cli.part_upload_drive_file
self.module_cli.part_upload_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_file(pylark.PartUploadDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.part_upload_drive_file = origin_func
def test_mock_self_func_finish_upload_drive_file(self):
origin_func = self.module_cli.finish_upload_drive_file
self.module_cli.finish_upload_drive_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_file(pylark.FinishUploadDriveFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.finish_upload_drive_file = origin_func
def test_mock_self_func_download_drive_media(self):
origin_func = self.module_cli.download_drive_media
self.module_cli.download_drive_media = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_media(pylark.DownloadDriveMediaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.download_drive_media = origin_func
def test_mock_self_func_upload_drive_media(self):
origin_func = self.module_cli.upload_drive_media
self.module_cli.upload_drive_media = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_media(pylark.UploadDriveMediaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.upload_drive_media = origin_func
def test_mock_self_func_prepare_upload_drive_media(self):
origin_func = self.module_cli.prepare_upload_drive_media
self.module_cli.prepare_upload_drive_media = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_media(
pylark.PrepareUploadDriveMediaReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.prepare_upload_drive_media = origin_func
def test_mock_self_func_part_upload_drive_media(self):
origin_func = self.module_cli.part_upload_drive_media
self.module_cli.part_upload_drive_media = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_media(pylark.PartUploadDriveMediaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.part_upload_drive_media = origin_func
def test_mock_self_func_finish_upload_drive_media(self):
origin_func = self.module_cli.finish_upload_drive_media
self.module_cli.finish_upload_drive_media = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_media(
pylark.FinishUploadDriveMediaReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.finish_upload_drive_media = origin_func
def test_mock_self_func_create_drive_member_permission_old(self):
origin_func = self.module_cli.create_drive_member_permission_old
self.module_cli.create_drive_member_permission_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission_old(
pylark.CreateDriveMemberPermissionOldReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_member_permission_old = origin_func
def test_mock_self_func_transfer_drive_member_permission(self):
origin_func = self.module_cli.transfer_drive_member_permission
self.module_cli.transfer_drive_member_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.transfer_drive_member_permission(
pylark.TransferDriveMemberPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.transfer_drive_member_permission = origin_func
def test_mock_self_func_get_drive_member_permission_list(self):
origin_func = self.module_cli.get_drive_member_permission_list
self.module_cli.get_drive_member_permission_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_member_permission_list(
pylark.GetDriveMemberPermissionListReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_member_permission_list = origin_func
def test_mock_self_func_create_drive_member_permission(self):
origin_func = self.module_cli.create_drive_member_permission
self.module_cli.create_drive_member_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission(
pylark.CreateDriveMemberPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_member_permission = origin_func
def test_mock_self_func_delete_drive_member_permission_old(self):
origin_func = self.module_cli.delete_drive_member_permission_old
self.module_cli.delete_drive_member_permission_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission_old(
pylark.DeleteDriveMemberPermissionOldReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_drive_member_permission_old = origin_func
def test_mock_self_func_delete_drive_member_permission(self):
origin_func = self.module_cli.delete_drive_member_permission
self.module_cli.delete_drive_member_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission(
pylark.DeleteDriveMemberPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_drive_member_permission = origin_func
def test_mock_self_func_update_drive_member_permission_old(self):
origin_func = self.module_cli.update_drive_member_permission_old
self.module_cli.update_drive_member_permission_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission_old(
pylark.UpdateDriveMemberPermissionOldReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_member_permission_old = origin_func
def test_mock_self_func_update_drive_member_permission(self):
origin_func = self.module_cli.update_drive_member_permission
self.module_cli.update_drive_member_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission(
pylark.UpdateDriveMemberPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_member_permission = origin_func
def test_mock_self_func_check_drive_member_permission(self):
origin_func = self.module_cli.check_drive_member_permission
self.module_cli.check_drive_member_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.check_drive_member_permission(
pylark.CheckDriveMemberPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.check_drive_member_permission = origin_func
def test_mock_self_func_update_drive_public_permission_v1_old(self):
origin_func = self.module_cli.update_drive_public_permission_v1_old
self.module_cli.update_drive_public_permission_v1_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v1_old(
pylark.UpdateDrivePublicPermissionV1OldReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_public_permission_v1_old = origin_func
def test_mock_self_func_update_drive_public_permission_v2_old(self):
origin_func = self.module_cli.update_drive_public_permission_v2_old
self.module_cli.update_drive_public_permission_v2_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v2_old(
pylark.UpdateDrivePublicPermissionV2OldReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_public_permission_v2_old = origin_func
def test_mock_self_func_get_drive_public_permission_v2(self):
origin_func = self.module_cli.get_drive_public_permission_v2
self.module_cli.get_drive_public_permission_v2 = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_public_permission_v2(
pylark.GetDrivePublicPermissionV2Req()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_public_permission_v2 = origin_func
def test_mock_self_func_update_drive_public_permission(self):
origin_func = self.module_cli.update_drive_public_permission
self.module_cli.update_drive_public_permission = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission(
pylark.UpdateDrivePublicPermissionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_public_permission = origin_func
def test_mock_self_func_batch_get_drive_media_tmp_download_url(self):
origin_func = self.module_cli.batch_get_drive_media_tmp_download_url
self.module_cli.batch_get_drive_media_tmp_download_url = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_drive_media_tmp_download_url(
pylark.BatchGetDriveMediaTmpDownloadURLReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_get_drive_media_tmp_download_url = origin_func
def test_mock_self_func_get_drive_comment_list(self):
origin_func = self.module_cli.get_drive_comment_list
self.module_cli.get_drive_comment_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment_list(pylark.GetDriveCommentListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_comment_list = origin_func
def test_mock_self_func_get_drive_comment(self):
origin_func = self.module_cli.get_drive_comment
self.module_cli.get_drive_comment = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment(pylark.GetDriveCommentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_comment = origin_func
def test_mock_self_func_create_drive_comment(self):
origin_func = self.module_cli.create_drive_comment
self.module_cli.create_drive_comment = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_comment(pylark.CreateDriveCommentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_comment = origin_func
def test_mock_self_func_update_drive_comment(self):
origin_func = self.module_cli.update_drive_comment
self.module_cli.update_drive_comment = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment(pylark.UpdateDriveCommentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_comment = origin_func
def test_mock_self_func_delete_drive_comment(self):
origin_func = self.module_cli.delete_drive_comment
self.module_cli.delete_drive_comment = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_comment(pylark.DeleteDriveCommentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_drive_comment = origin_func
def test_mock_self_func_update_drive_comment_patch(self):
origin_func = self.module_cli.update_drive_comment_patch
self.module_cli.update_drive_comment_patch = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment_patch(
pylark.UpdateDriveCommentPatchReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_drive_comment_patch = origin_func
def test_mock_self_func_create_drive_doc(self):
origin_func = self.module_cli.create_drive_doc
self.module_cli.create_drive_doc = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_doc(pylark.CreateDriveDocReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_doc = origin_func
def test_mock_self_func_get_drive_doc_content(self):
origin_func = self.module_cli.get_drive_doc_content
self.module_cli.get_drive_doc_content = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_content(pylark.GetDriveDocContentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_doc_content = origin_func
def test_mock_self_func_get_drive_doc_raw_content(self):
origin_func = self.module_cli.get_drive_doc_raw_content
self.module_cli.get_drive_doc_raw_content = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_raw_content(pylark.GetDriveDocRawContentReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_doc_raw_content = origin_func
def test_mock_self_func_get_drive_doc_meta(self):
origin_func = self.module_cli.get_drive_doc_meta
self.module_cli.get_drive_doc_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_meta(pylark.GetDriveDocMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_doc_meta = origin_func
def test_mock_self_func_create_sheet(self):
origin_func = self.module_cli.create_sheet
self.module_cli.create_sheet = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet(pylark.CreateSheetReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet = origin_func
def test_mock_self_func_get_sheet_meta(self):
origin_func = self.module_cli.get_sheet_meta
self.module_cli.get_sheet_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_meta(pylark.GetSheetMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_meta = origin_func
def test_mock_self_func_update_sheet_property(self):
origin_func = self.module_cli.update_sheet_property
self.module_cli.update_sheet_property = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_property(pylark.UpdateSheetPropertyReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_property = origin_func
def test_mock_self_func_batch_update_sheet(self):
origin_func = self.module_cli.batch_update_sheet
self.module_cli.batch_update_sheet = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_sheet(pylark.BatchUpdateSheetReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_update_sheet = origin_func
def test_mock_self_func_import_sheet(self):
origin_func = self.module_cli.import_sheet
self.module_cli.import_sheet = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.import_sheet(pylark.ImportSheetReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.import_sheet = origin_func
def test_mock_self_func_create_drive_import_task(self):
origin_func = self.module_cli.create_drive_import_task
self.module_cli.create_drive_import_task = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_import_task(pylark.CreateDriveImportTaskReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_drive_import_task = origin_func
def test_mock_self_func_get_drive_import_task(self):
origin_func = self.module_cli.get_drive_import_task
self.module_cli.get_drive_import_task = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_import_task(pylark.GetDriveImportTaskReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_drive_import_task = origin_func
def test_mock_self_func_move_sheet_dimension(self):
origin_func = self.module_cli.move_sheet_dimension
self.module_cli.move_sheet_dimension = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_sheet_dimension(pylark.MoveSheetDimensionReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.move_sheet_dimension = origin_func
def test_mock_self_func_prepend_sheet_value(self):
origin_func = self.module_cli.prepend_sheet_value
self.module_cli.prepend_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepend_sheet_value(pylark.PrependSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.prepend_sheet_value = origin_func
def test_mock_self_func_append_sheet_value(self):
origin_func = self.module_cli.append_sheet_value
self.module_cli.append_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.append_sheet_value(pylark.AppendSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.append_sheet_value = origin_func
def test_mock_self_func_insert_sheet_dimension_range(self):
origin_func = self.module_cli.insert_sheet_dimension_range
self.module_cli.insert_sheet_dimension_range = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.insert_sheet_dimension_range(
pylark.InsertSheetDimensionRangeReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.insert_sheet_dimension_range = origin_func
def test_mock_self_func_add_sheet_dimension_range(self):
origin_func = self.module_cli.add_sheet_dimension_range
self.module_cli.add_sheet_dimension_range = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_sheet_dimension_range(
pylark.AddSheetDimensionRangeReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.add_sheet_dimension_range = origin_func
def test_mock_self_func_update_sheet_dimension_range(self):
origin_func = self.module_cli.update_sheet_dimension_range
self.module_cli.update_sheet_dimension_range = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_dimension_range(
pylark.UpdateSheetDimensionRangeReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_dimension_range = origin_func
def test_mock_self_func_delete_sheet_dimension_range(self):
origin_func = self.module_cli.delete_sheet_dimension_range
self.module_cli.delete_sheet_dimension_range = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_dimension_range(
pylark.DeleteSheetDimensionRangeReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_dimension_range = origin_func
def test_mock_self_func_get_sheet_value(self):
origin_func = self.module_cli.get_sheet_value
self.module_cli.get_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_value(pylark.GetSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_value = origin_func
def test_mock_self_func_batch_get_sheet_value(self):
origin_func = self.module_cli.batch_get_sheet_value
self.module_cli.batch_get_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_sheet_value(pylark.BatchGetSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_get_sheet_value = origin_func
def test_mock_self_func_set_sheet_value(self):
origin_func = self.module_cli.set_sheet_value
self.module_cli.set_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value(pylark.SetSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.set_sheet_value = origin_func
def test_mock_self_func_batch_set_sheet_value(self):
origin_func = self.module_cli.batch_set_sheet_value
self.module_cli.batch_set_sheet_value = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_value(pylark.BatchSetSheetValueReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_set_sheet_value = origin_func
def test_mock_self_func_set_sheet_style(self):
origin_func = self.module_cli.set_sheet_style
self.module_cli.set_sheet_style = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_style(pylark.SetSheetStyleReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.set_sheet_style = origin_func
def test_mock_self_func_batch_set_sheet_style(self):
origin_func = self.module_cli.batch_set_sheet_style
self.module_cli.batch_set_sheet_style = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_style(pylark.BatchSetSheetStyleReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_set_sheet_style = origin_func
def test_mock_self_func_merge_sheet_cell(self):
origin_func = self.module_cli.merge_sheet_cell
self.module_cli.merge_sheet_cell = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.merge_sheet_cell(pylark.MergeSheetCellReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.merge_sheet_cell = origin_func
def test_mock_self_func_unmerge_sheet_cell(self):
origin_func = self.module_cli.unmerge_sheet_cell
self.module_cli.unmerge_sheet_cell = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.unmerge_sheet_cell(pylark.UnmergeSheetCellReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.unmerge_sheet_cell = origin_func
def test_mock_self_func_set_sheet_value_image(self):
origin_func = self.module_cli.set_sheet_value_image
self.module_cli.set_sheet_value_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value_image(pylark.SetSheetValueImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.set_sheet_value_image = origin_func
def test_mock_self_func_find_sheet(self):
origin_func = self.module_cli.find_sheet
self.module_cli.find_sheet = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.find_sheet(pylark.FindSheetReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.find_sheet = origin_func
def test_mock_self_func_replace_sheet(self):
origin_func = self.module_cli.replace_sheet
self.module_cli.replace_sheet = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.replace_sheet(pylark.ReplaceSheetReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.replace_sheet = origin_func
def test_mock_self_func_create_sheet_condition_format(self):
origin_func = self.module_cli.create_sheet_condition_format
self.module_cli.create_sheet_condition_format = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_condition_format(
pylark.CreateSheetConditionFormatReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_condition_format = origin_func
def test_mock_self_func_get_sheet_condition_format(self):
origin_func = self.module_cli.get_sheet_condition_format
self.module_cli.get_sheet_condition_format = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_condition_format(
pylark.GetSheetConditionFormatReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_condition_format = origin_func
def test_mock_self_func_update_sheet_condition_format(self):
origin_func = self.module_cli.update_sheet_condition_format
self.module_cli.update_sheet_condition_format = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_condition_format(
pylark.UpdateSheetConditionFormatReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_condition_format = origin_func
def test_mock_self_func_delete_sheet_condition_format(self):
origin_func = self.module_cli.delete_sheet_condition_format
self.module_cli.delete_sheet_condition_format = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_condition_format(
pylark.DeleteSheetConditionFormatReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_condition_format = origin_func
def test_mock_self_func_create_sheet_protected_dimension(self):
origin_func = self.module_cli.create_sheet_protected_dimension
self.module_cli.create_sheet_protected_dimension = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_protected_dimension(
pylark.CreateSheetProtectedDimensionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_protected_dimension = origin_func
def test_mock_self_func_get_sheet_protected_dimension(self):
origin_func = self.module_cli.get_sheet_protected_dimension
self.module_cli.get_sheet_protected_dimension = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_protected_dimension(
pylark.GetSheetProtectedDimensionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_protected_dimension = origin_func
def test_mock_self_func_update_sheet_protected_dimension(self):
origin_func = self.module_cli.update_sheet_protected_dimension
self.module_cli.update_sheet_protected_dimension = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_protected_dimension(
pylark.UpdateSheetProtectedDimensionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_protected_dimension = origin_func
def test_mock_self_func_delete_sheet_protected_dimension(self):
origin_func = self.module_cli.delete_sheet_protected_dimension
self.module_cli.delete_sheet_protected_dimension = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_protected_dimension(
pylark.DeleteSheetProtectedDimensionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_protected_dimension = origin_func
def test_mock_self_func_create_sheet_data_validation_dropdown(self):
origin_func = self.module_cli.create_sheet_data_validation_dropdown
self.module_cli.create_sheet_data_validation_dropdown = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_data_validation_dropdown(
pylark.CreateSheetDataValidationDropdownReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_data_validation_dropdown = origin_func
def test_mock_self_func_delete_sheet_data_validation_dropdown(self):
origin_func = self.module_cli.delete_sheet_data_validation_dropdown
self.module_cli.delete_sheet_data_validation_dropdown = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_data_validation_dropdown(
pylark.DeleteSheetDataValidationDropdownReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_data_validation_dropdown = origin_func
def test_mock_self_func_update_sheet_data_validation_dropdown(self):
origin_func = self.module_cli.update_sheet_data_validation_dropdown
self.module_cli.update_sheet_data_validation_dropdown = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_data_validation_dropdown(
pylark.UpdateSheetDataValidationDropdownReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_data_validation_dropdown = origin_func
def test_mock_self_func_get_sheet_data_validation_dropdown(self):
origin_func = self.module_cli.get_sheet_data_validation_dropdown
self.module_cli.get_sheet_data_validation_dropdown = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_data_validation_dropdown(
pylark.GetSheetDataValidationDropdownReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_data_validation_dropdown = origin_func
def test_mock_self_func_create_sheet_filter(self):
origin_func = self.module_cli.create_sheet_filter
self.module_cli.create_sheet_filter = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter(pylark.CreateSheetFilterReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_filter = origin_func
def test_mock_self_func_delete_sheet_filter(self):
origin_func = self.module_cli.delete_sheet_filter
self.module_cli.delete_sheet_filter = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter(pylark.DeleteSheetFilterReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_filter = origin_func
def test_mock_self_func_update_sheet_filter(self):
origin_func = self.module_cli.update_sheet_filter
self.module_cli.update_sheet_filter = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter(pylark.UpdateSheetFilterReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_filter = origin_func
def test_mock_self_func_get_sheet_filter(self):
origin_func = self.module_cli.get_sheet_filter
self.module_cli.get_sheet_filter = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter(pylark.GetSheetFilterReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_filter = origin_func
def test_mock_self_func_create_sheet_filter_view(self):
origin_func = self.module_cli.create_sheet_filter_view
self.module_cli.create_sheet_filter_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view(pylark.CreateSheetFilterViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_filter_view = origin_func
def test_mock_self_func_delete_sheet_filter_view(self):
origin_func = self.module_cli.delete_sheet_filter_view
self.module_cli.delete_sheet_filter_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view(pylark.DeleteSheetFilterViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_filter_view = origin_func
def test_mock_self_func_update_sheet_filter_view(self):
origin_func = self.module_cli.update_sheet_filter_view
self.module_cli.update_sheet_filter_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view(pylark.UpdateSheetFilterViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_filter_view = origin_func
def test_mock_self_func_get_sheet_filter_view(self):
origin_func = self.module_cli.get_sheet_filter_view
self.module_cli.get_sheet_filter_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view(pylark.GetSheetFilterViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_filter_view = origin_func
def test_mock_self_func_query_sheet_filter_view(self):
origin_func = self.module_cli.query_sheet_filter_view
self.module_cli.query_sheet_filter_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view(pylark.QuerySheetFilterViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.query_sheet_filter_view = origin_func
def test_mock_self_func_create_sheet_filter_view_condition(self):
origin_func = self.module_cli.create_sheet_filter_view_condition
self.module_cli.create_sheet_filter_view_condition = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view_condition(
pylark.CreateSheetFilterViewConditionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_filter_view_condition = origin_func
def test_mock_self_func_delete_sheet_filter_view_condition(self):
origin_func = self.module_cli.delete_sheet_filter_view_condition
self.module_cli.delete_sheet_filter_view_condition = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view_condition(
pylark.DeleteSheetFilterViewConditionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_filter_view_condition = origin_func
def test_mock_self_func_update_sheet_filter_view_condition(self):
origin_func = self.module_cli.update_sheet_filter_view_condition
self.module_cli.update_sheet_filter_view_condition = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view_condition(
pylark.UpdateSheetFilterViewConditionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_filter_view_condition = origin_func
def test_mock_self_func_get_sheet_filter_view_condition(self):
origin_func = self.module_cli.get_sheet_filter_view_condition
self.module_cli.get_sheet_filter_view_condition = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view_condition(
pylark.GetSheetFilterViewConditionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_filter_view_condition = origin_func
def test_mock_self_func_query_sheet_filter_view_condition(self):
origin_func = self.module_cli.query_sheet_filter_view_condition
self.module_cli.query_sheet_filter_view_condition = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view_condition(
pylark.QuerySheetFilterViewConditionReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.query_sheet_filter_view_condition = origin_func
def test_mock_self_func_create_sheet_float_image(self):
origin_func = self.module_cli.create_sheet_float_image
self.module_cli.create_sheet_float_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_float_image(pylark.CreateSheetFloatImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_sheet_float_image = origin_func
def test_mock_self_func_delete_sheet_float_image(self):
origin_func = self.module_cli.delete_sheet_float_image
self.module_cli.delete_sheet_float_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_float_image(pylark.DeleteSheetFloatImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_sheet_float_image = origin_func
def test_mock_self_func_update_sheet_float_image(self):
origin_func = self.module_cli.update_sheet_float_image
self.module_cli.update_sheet_float_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_float_image(pylark.UpdateSheetFloatImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_sheet_float_image = origin_func
def test_mock_self_func_get_sheet_float_image(self):
origin_func = self.module_cli.get_sheet_float_image
self.module_cli.get_sheet_float_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_float_image(pylark.GetSheetFloatImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_sheet_float_image = origin_func
def test_mock_self_func_query_sheet_float_image(self):
origin_func = self.module_cli.query_sheet_float_image
self.module_cli.query_sheet_float_image = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_float_image(pylark.QuerySheetFloatImageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.query_sheet_float_image = origin_func
def test_mock_self_func_get_wiki_space_list(self):
origin_func = self.module_cli.get_wiki_space_list
self.module_cli.get_wiki_space_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space_list(pylark.GetWikiSpaceListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_wiki_space_list = origin_func
def test_mock_self_func_get_wiki_space(self):
origin_func = self.module_cli.get_wiki_space
self.module_cli.get_wiki_space = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space(pylark.GetWikiSpaceReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_wiki_space = origin_func
def test_mock_self_func_update_wiki_space_setting(self):
origin_func = self.module_cli.update_wiki_space_setting
self.module_cli.update_wiki_space_setting = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_wiki_space_setting(
pylark.UpdateWikiSpaceSettingReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_wiki_space_setting = origin_func
def test_mock_self_func_add_wiki_space_member(self):
origin_func = self.module_cli.add_wiki_space_member
self.module_cli.add_wiki_space_member = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_wiki_space_member(pylark.AddWikiSpaceMemberReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.add_wiki_space_member = origin_func
def test_mock_self_func_create_wiki_node(self):
origin_func = self.module_cli.create_wiki_node
self.module_cli.create_wiki_node = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_wiki_node(pylark.CreateWikiNodeReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_wiki_node = origin_func
def test_mock_self_func_get_wiki_node_list(self):
origin_func = self.module_cli.get_wiki_node_list
self.module_cli.get_wiki_node_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node_list(pylark.GetWikiNodeListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_wiki_node_list = origin_func
def test_mock_self_func_get_wiki_node(self):
origin_func = self.module_cli.get_wiki_node
self.module_cli.get_wiki_node = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node(pylark.GetWikiNodeReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_wiki_node = origin_func
def test_mock_self_func_move_docs_to_wiki(self):
origin_func = self.module_cli.move_docs_to_wiki
self.module_cli.move_docs_to_wiki = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_docs_to_wiki(pylark.MoveDocsToWikiReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.move_docs_to_wiki = origin_func
# mock raw request
class TestDriveSampleMockRawRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDriveSampleMockRawRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.drive
self.cli.raw_request = mock_raw_request
def test_mock_raw_request_get_drive_file_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_meta(pylark.GetDriveFileMetaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_file(
pylark.CreateDriveFileReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_copy_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.copy_drive_file(
pylark.CopyDriveFileReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_file(
pylark.DeleteDriveFileReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_drive_sheet_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_sheet_file(
pylark.DeleteDriveSheetFileReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_folder(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_folder(
pylark.CreateDriveFolderReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_meta(
pylark.GetDriveFolderMetaReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_root_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_root_folder_meta(
pylark.GetDriveRootFolderMetaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_folder_children(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_children(
pylark.GetDriveFolderChildrenReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_file_statistics(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_statistics(
pylark.GetDriveFileStatisticsReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_download_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_file(
pylark.DownloadDriveFileReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_file(pylark.UploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_prepare_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_file(
pylark.PrepareUploadDriveFileReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_part_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_file(pylark.PartUploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_finish_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_file(pylark.FinishUploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_download_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_media(
pylark.DownloadDriveMediaReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_media(pylark.UploadDriveMediaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_prepare_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_media(
pylark.PrepareUploadDriveMediaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_part_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_media(pylark.PartUploadDriveMediaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_finish_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_media(
pylark.FinishUploadDriveMediaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission_old(
pylark.CreateDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_transfer_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.transfer_drive_member_permission(
pylark.TransferDriveMemberPermissionReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_member_permission_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_member_permission_list(
pylark.GetDriveMemberPermissionListReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission(
pylark.CreateDriveMemberPermissionReq(
token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission_old(
pylark.DeleteDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission(
pylark.DeleteDriveMemberPermissionReq(
token="x",
member_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission_old(
pylark.UpdateDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission(
pylark.UpdateDriveMemberPermissionReq(
token="x",
member_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_check_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.check_drive_member_permission(
pylark.CheckDriveMemberPermissionReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_public_permission_v1_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v1_old(
pylark.UpdateDrivePublicPermissionV1OldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_public_permission_v2_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v2_old(
pylark.UpdateDrivePublicPermissionV2OldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_public_permission_v2(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_public_permission_v2(
pylark.GetDrivePublicPermissionV2Req()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_public_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission(
pylark.UpdateDrivePublicPermissionReq(
token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_get_drive_media_tmp_download_url(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_drive_media_tmp_download_url(
pylark.BatchGetDriveMediaTmpDownloadURLReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_comment_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment_list(
pylark.GetDriveCommentListReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment(
pylark.GetDriveCommentReq(
file_token="x",
comment_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_comment(
pylark.CreateDriveCommentReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment(
pylark.UpdateDriveCommentReq(
file_token="x",
comment_id="x",
reply_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_comment(
pylark.DeleteDriveCommentReq(
file_token="x",
comment_id="x",
reply_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_drive_comment_patch(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment_patch(
pylark.UpdateDriveCommentPatchReq(
file_token="x",
comment_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_doc(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_doc(pylark.CreateDriveDocReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_doc_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_content(
pylark.GetDriveDocContentReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_doc_raw_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_raw_content(
pylark.GetDriveDocRawContentReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_doc_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_meta(
pylark.GetDriveDocMetaReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet(pylark.CreateSheetReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_meta(
pylark.GetSheetMetaReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_property(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_property(
pylark.UpdateSheetPropertyReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_update_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_sheet(
pylark.BatchUpdateSheetReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_import_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.import_sheet(pylark.ImportSheetReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_import_task(pylark.CreateDriveImportTaskReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_import_task(
pylark.GetDriveImportTaskReq(
ticket="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_move_sheet_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_sheet_dimension(
pylark.MoveSheetDimensionReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_prepend_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepend_sheet_value(
pylark.PrependSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_append_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.append_sheet_value(
pylark.AppendSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_insert_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.insert_sheet_dimension_range(
pylark.InsertSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_add_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_sheet_dimension_range(
pylark.AddSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_dimension_range(
pylark.UpdateSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_dimension_range(
pylark.DeleteSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_value(
pylark.GetSheetValueReq(
spreadsheet_token="x",
range_="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_sheet_value(
pylark.BatchGetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value(
pylark.SetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_value(
pylark.BatchSetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_style(
pylark.SetSheetStyleReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_style(
pylark.BatchSetSheetStyleReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_merge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.merge_sheet_cell(
pylark.MergeSheetCellReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_unmerge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.unmerge_sheet_cell(
pylark.UnmergeSheetCellReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_set_sheet_value_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value_image(
pylark.SetSheetValueImageReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_find_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.find_sheet(
pylark.FindSheetReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_replace_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.replace_sheet(
pylark.ReplaceSheetReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_condition_format(
pylark.CreateSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_condition_format(
pylark.GetSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_condition_format(
pylark.UpdateSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_condition_format(
pylark.DeleteSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_protected_dimension(
pylark.CreateSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_protected_dimension(
pylark.GetSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_protected_dimension(
pylark.UpdateSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_protected_dimension(
pylark.DeleteSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_data_validation_dropdown(
pylark.CreateSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_data_validation_dropdown(
pylark.DeleteSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_data_validation_dropdown(
pylark.UpdateSheetDataValidationDropdownReq(
spreadsheet_token="x",
sheet_id="x",
data_validation_id=1,
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_data_validation_dropdown(
pylark.GetSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter(
pylark.CreateSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter(
pylark.DeleteSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter(
pylark.UpdateSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter(
pylark.GetSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view(
pylark.CreateSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view(
pylark.DeleteSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view(
pylark.UpdateSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view(
pylark.GetSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_query_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view(
pylark.QuerySheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view_condition(
pylark.CreateSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view_condition(
pylark.DeleteSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view_condition(
pylark.UpdateSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view_condition(
pylark.GetSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_query_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view_condition(
pylark.QuerySheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_float_image(
pylark.CreateSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_float_image(
pylark.DeleteSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_float_image(
pylark.UpdateSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_float_image(
pylark.GetSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_query_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_float_image(
pylark.QuerySheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_wiki_space_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space_list(pylark.GetWikiSpaceListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_wiki_space(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space(
pylark.GetWikiSpaceReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_wiki_space_setting(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_wiki_space_setting(
pylark.UpdateWikiSpaceSettingReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_add_wiki_space_member(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_wiki_space_member(
pylark.AddWikiSpaceMemberReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_wiki_node(
pylark.CreateWikiNodeReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_wiki_node_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node_list(
pylark.GetWikiNodeListReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node(pylark.GetWikiNodeReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_move_docs_to_wiki(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_docs_to_wiki(
pylark.MoveDocsToWikiReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
# real request
class TestDriveSampleRealRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDriveSampleRealRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_no_permission.ins()
self.module_cli = self.cli.drive
def test_real_request_get_drive_file_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_meta(pylark.GetDriveFileMetaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_file(
pylark.CreateDriveFileReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_copy_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.copy_drive_file(
pylark.CopyDriveFileReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_file(
pylark.DeleteDriveFileReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_drive_sheet_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_sheet_file(
pylark.DeleteDriveSheetFileReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_folder(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_folder(
pylark.CreateDriveFolderReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_meta(
pylark.GetDriveFolderMetaReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_root_folder_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_root_folder_meta(
pylark.GetDriveRootFolderMetaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_folder_children(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_folder_children(
pylark.GetDriveFolderChildrenReq(
folder_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_file_statistics(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_file_statistics(
pylark.GetDriveFileStatisticsReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_download_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_file(
pylark.DownloadDriveFileReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_file(pylark.UploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_prepare_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_file(
pylark.PrepareUploadDriveFileReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_part_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_file(pylark.PartUploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_finish_upload_drive_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_file(pylark.FinishUploadDriveFileReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_download_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.download_drive_media(
pylark.DownloadDriveMediaReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.upload_drive_media(pylark.UploadDriveMediaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_prepare_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepare_upload_drive_media(
pylark.PrepareUploadDriveMediaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_part_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.part_upload_drive_media(pylark.PartUploadDriveMediaReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_finish_upload_drive_media(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.finish_upload_drive_media(
pylark.FinishUploadDriveMediaReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission_old(
pylark.CreateDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_transfer_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.transfer_drive_member_permission(
pylark.TransferDriveMemberPermissionReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_member_permission_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_member_permission_list(
pylark.GetDriveMemberPermissionListReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_member_permission(
pylark.CreateDriveMemberPermissionReq(
token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission_old(
pylark.DeleteDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_member_permission(
pylark.DeleteDriveMemberPermissionReq(
token="x",
member_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_member_permission_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission_old(
pylark.UpdateDriveMemberPermissionOldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_member_permission(
pylark.UpdateDriveMemberPermissionReq(
token="x",
member_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_check_drive_member_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.check_drive_member_permission(
pylark.CheckDriveMemberPermissionReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_public_permission_v1_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v1_old(
pylark.UpdateDrivePublicPermissionV1OldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_public_permission_v2_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission_v2_old(
pylark.UpdateDrivePublicPermissionV2OldReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_public_permission_v2(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_public_permission_v2(
pylark.GetDrivePublicPermissionV2Req()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_public_permission(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_public_permission(
pylark.UpdateDrivePublicPermissionReq(
token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_get_drive_media_tmp_download_url(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_drive_media_tmp_download_url(
pylark.BatchGetDriveMediaTmpDownloadURLReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_comment_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment_list(
pylark.GetDriveCommentListReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_comment(
pylark.GetDriveCommentReq(
file_token="x",
comment_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_comment(
pylark.CreateDriveCommentReq(
file_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment(
pylark.UpdateDriveCommentReq(
file_token="x",
comment_id="x",
reply_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_drive_comment(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_drive_comment(
pylark.DeleteDriveCommentReq(
file_token="x",
comment_id="x",
reply_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_drive_comment_patch(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_drive_comment_patch(
pylark.UpdateDriveCommentPatchReq(
file_token="x",
comment_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_doc(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_doc(pylark.CreateDriveDocReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_doc_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_content(
pylark.GetDriveDocContentReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_doc_raw_content(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_raw_content(
pylark.GetDriveDocRawContentReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_doc_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_doc_meta(
pylark.GetDriveDocMetaReq(
doc_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet(pylark.CreateSheetReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_meta(
pylark.GetSheetMetaReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_property(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_property(
pylark.UpdateSheetPropertyReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_update_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_sheet(
pylark.BatchUpdateSheetReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_import_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.import_sheet(pylark.ImportSheetReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_drive_import_task(pylark.CreateDriveImportTaskReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_drive_import_task(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_drive_import_task(
pylark.GetDriveImportTaskReq(
ticket="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_move_sheet_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_sheet_dimension(
pylark.MoveSheetDimensionReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_prepend_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.prepend_sheet_value(
pylark.PrependSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_append_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.append_sheet_value(
pylark.AppendSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_insert_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.insert_sheet_dimension_range(
pylark.InsertSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_add_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_sheet_dimension_range(
pylark.AddSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_dimension_range(
pylark.UpdateSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_dimension_range(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_dimension_range(
pylark.DeleteSheetDimensionRangeReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_value(
pylark.GetSheetValueReq(
spreadsheet_token="x",
range_="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_get_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_get_sheet_value(
pylark.BatchGetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value(
pylark.SetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_set_sheet_value(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_value(
pylark.BatchSetSheetValueReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_style(
pylark.SetSheetStyleReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_set_sheet_style(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_set_sheet_style(
pylark.BatchSetSheetStyleReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_merge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.merge_sheet_cell(
pylark.MergeSheetCellReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_unmerge_sheet_cell(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.unmerge_sheet_cell(
pylark.UnmergeSheetCellReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_set_sheet_value_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.set_sheet_value_image(
pylark.SetSheetValueImageReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_find_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.find_sheet(
pylark.FindSheetReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_replace_sheet(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.replace_sheet(
pylark.ReplaceSheetReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_condition_format(
pylark.CreateSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_condition_format(
pylark.GetSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_condition_format(
pylark.UpdateSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_condition_format(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_condition_format(
pylark.DeleteSheetConditionFormatReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_protected_dimension(
pylark.CreateSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_protected_dimension(
pylark.GetSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_protected_dimension(
pylark.UpdateSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_protected_dimension(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_protected_dimension(
pylark.DeleteSheetProtectedDimensionReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_data_validation_dropdown(
pylark.CreateSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_data_validation_dropdown(
pylark.DeleteSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_data_validation_dropdown(
pylark.UpdateSheetDataValidationDropdownReq(
spreadsheet_token="x",
sheet_id="x",
data_validation_id=1,
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_data_validation_dropdown(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_data_validation_dropdown(
pylark.GetSheetDataValidationDropdownReq(
spreadsheet_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter(
pylark.CreateSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter(
pylark.DeleteSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter(
pylark.UpdateSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_filter(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter(
pylark.GetSheetFilterReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view(
pylark.CreateSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view(
pylark.DeleteSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view(
pylark.UpdateSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view(
pylark.GetSheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_query_sheet_filter_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view(
pylark.QuerySheetFilterViewReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_filter_view_condition(
pylark.CreateSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_filter_view_condition(
pylark.DeleteSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_filter_view_condition(
pylark.UpdateSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_filter_view_condition(
pylark.GetSheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
condition_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_query_sheet_filter_view_condition(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_filter_view_condition(
pylark.QuerySheetFilterViewConditionReq(
spreadsheet_token="x",
sheet_id="x",
filter_view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_sheet_float_image(
pylark.CreateSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_sheet_float_image(
pylark.DeleteSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_sheet_float_image(
pylark.UpdateSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_sheet_float_image(
pylark.GetSheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
float_image_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_query_sheet_float_image(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.query_sheet_float_image(
pylark.QuerySheetFloatImageReq(
spreadsheet_token="x",
sheet_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_wiki_space_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space_list(pylark.GetWikiSpaceListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_wiki_space(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_space(
pylark.GetWikiSpaceReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_wiki_space_setting(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_wiki_space_setting(
pylark.UpdateWikiSpaceSettingReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_add_wiki_space_member(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.add_wiki_space_member(
pylark.AddWikiSpaceMemberReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_wiki_node(
pylark.CreateWikiNodeReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_wiki_node_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node_list(
pylark.GetWikiNodeListReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_wiki_node(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_wiki_node(pylark.GetWikiNodeReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_move_docs_to_wiki(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.move_docs_to_wiki(
pylark.MoveDocsToWikiReq(
space_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
| 36.80886
| 88
| 0.652749
| 19,520
| 161,186
| 5.065779
| 0.0125
| 0.076858
| 0.099915
| 0.076453
| 0.993103
| 0.991131
| 0.982242
| 0.961895
| 0.940577
| 0.903817
| 0
| 0.002258
| 0.27199
| 161,186
| 4,378
| 89
| 36.817268
| 0.840419
| 0.000676
| 0
| 0.63961
| 1
| 0
| 0.03812
| 0.015565
| 0
| 0
| 0
| 0
| 0.223141
| 1
| 0.12928
| false
| 0
| 0.011806
| 0
| 0.142267
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a64c022309cbe0a220144817328b319ba96f7547
| 15,665
|
py
|
Python
|
tests/commands/test_cloud.py
|
pm3310/sagify
|
79de19e938414a4d0de687e1d3d443711314d9d2
|
[
"MIT"
] | 3
|
2019-06-10T18:34:42.000Z
|
2019-10-17T13:51:54.000Z
|
tests/commands/test_cloud.py
|
pm3310/sagify
|
79de19e938414a4d0de687e1d3d443711314d9d2
|
[
"MIT"
] | null | null | null |
tests/commands/test_cloud.py
|
pm3310/sagify
|
79de19e938414a4d0de687e1d3d443711314d9d2
|
[
"MIT"
] | 2
|
2019-10-17T13:52:10.000Z
|
2021-08-21T07:49:50.000Z
|
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from click.testing import CliRunner
import sagify
from sagify.config.config import Config
from sagify.__main__ import cli
class TestUploadData(object):
def test_upload_data_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
instance.upload_data.assert_called_with('input_data/', 's3://path-to-data')
assert result.exit_code == 0
def test_upload_data_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-d',
'src/',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
instance.upload_data.assert_called_with('input_data/', 's3://path-to-data')
assert result.exit_code == 0
def test_upload_data_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-d',
'invalid_dir/',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
assert instance.upload_data.call_count == 0
assert result.exit_code == -1
class TestTrain(object):
def test_train_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.train.call_count == 1
instance.train.assert_called_with(
image_name='sagemaker-img',
input_s3_data_location='s3://bucket/input',
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
train_volume_size=30,
train_max_run=24 * 60 * 60,
output_path='s3://bucket/output',
hyperparameters=None
)
assert result.exit_code == 0
def test_train_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-d',
'src/',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.train.call_count == 1
instance.train.assert_called_with(
image_name='sagemaker-img',
input_s3_data_location='s3://bucket/input',
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
train_volume_size=30,
train_max_run=24 * 60 * 60,
output_path='s3://bucket/output',
hyperparameters=None
)
assert result.exit_code == 0
def test_train_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-d',
'invalid_dir/',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert not instance.train.called
assert result.exit_code == -1
class TestDeploy(object):
def test_train_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.deploy.call_count == 1
instance.deploy.assert_called_with(
image_name='sagemaker-img',
s3_model_location='s3://bucket/model/location/model.tar.gz',
train_instance_count=2,
train_instance_type='ml.c4.2xlarge'
)
assert result.exit_code == 0
def test_train_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-d',
'src/',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.deploy.call_count == 1
instance.deploy.assert_called_with(
image_name='sagemaker-img',
s3_model_location='s3://bucket/model/location/model.tar.gz',
train_instance_count=2,
train_instance_type='ml.c4.2xlarge'
)
assert result.exit_code == 0
def test_train_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-d',
'invalid_dir/',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert not instance.deploy.called
assert result.exit_code == -1
| 41.662234
| 99
| 0.418449
| 1,280
| 15,665
| 4.879688
| 0.088281
| 0.038905
| 0.043228
| 0.060519
| 0.951169
| 0.950208
| 0.936599
| 0.933077
| 0.929395
| 0.929395
| 0
| 0.014883
| 0.485286
| 15,665
| 375
| 100
| 41.773333
| 0.759767
| 0
| 0
| 0.839763
| 0
| 0
| 0.163869
| 0.084073
| 0
| 0
| 0
| 0
| 0.065282
| 1
| 0.026706
| false
| 0
| 0.020772
| 0
| 0.05638
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a6847afb9f867644d48e0218625f0dd5bd6e0330
| 13,700
|
py
|
Python
|
deprecated/version1/utility.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | null | null | null |
deprecated/version1/utility.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | 1
|
2020-10-21T18:15:46.000Z
|
2020-10-21T18:15:46.000Z
|
deprecated/version1/utility.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 19:15:04 2018
@author: yuluo
"""
import subprocess
class Utility(object):
def __init__(self, debug=False):
"""
initializes the utulity class for awscm
:param debug: enables debug information to be printed
"""
self.debug = debug
self.default_path_aws = '/home/ubuntu/'
def get_instance(self, instance):
"""
get the content of the labeled or named instance
:param instance: the key-value pair of the instance information
:return instance: the detailed value of the instance
"""
title = list(instance.keys())[0]
instance = instance.get(title)
return instance
def copy_file(self, instance, file, where):
# runable for aws
"""
copy the file from local into the instance
:param instance: the instance that we want to access
:param file: the file path that we want to copy to the instance
:param where: the destination of the copied file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, file, username + ":" + self.default_path_aws + where])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
subprocess.check_output(["scp", "-i", key, file, username + ":" + self.default_path_aws + where])
return "Success to copy the file " + file + " to " + self.default_path_aws + where
except:
return "Fail to access the instance"
def copy_folder(self, instance, folder, where):
# runable for aws
"""
copy the folder from local into the instance
:param instance: the instance that we want to access
:param folder: the folder path that we want to copy to the instance
:param where: the destination of the copied file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, "-r", folder, username + ":" + self.default_path_aws + where])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
subprocess.check_output(
["scp", "-i", key, "-r", folder, username + ":" + self.default_path_aws + where])
return "Success to copy the folder " + folder + " to " + self.default_path_aws + where
except:
return "Fail to access the instance"
def dir_list(self, instance, where):
"""
list objects from the instance directory
:param instance: the instance we want to access
:param where: the directory that we want to view
:return output: the list of objects
"""
instance = self.get_instance(instance)
output = ''
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(["ssh", key, username, 'ls', self.default_path_aws + where]).decode(
"utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", "-i", key, username, 'ls', self.default_path_aws + where]).decode("utf-8")
return output
except:
return "Fail to access the instance"
def delete_file(self, instance, file, where):
"""
delete the file from the instance
:param instance: the instance that we want to access
:param file: the file name that we want to delete
:param where: the destination of the deleted file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'rm', self.default_path_aws + where + file])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(["ssh", "-i", key, username, 'rm', self.default_path_aws + where + file])
return "Success to delete the file " + file + " from " + self.default_path_aws + where
except:
return "Fail to access the instance"
def delete_folder(self, instance, folder, where):
"""
delete the folder from the instance
:param instance: the instance that we want to access
:param folder: the folder name that we want to delete
:param where: the destination of the deleted folder
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'rm', '-r', self.default_path_aws + where + folder])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["ssh", "-i", key, username, 'rm', '-r', self.default_path_aws + where + folder])
return "Success to delete the folder " + folder + " from " + self.default_path_aws + where
except:
return "Fail to access the instance"
def create_folder(self, instance, folder, where):
"""
create a folder in the instance
:param instance: the instance that we want to access
:param folder: the name of created folder
:param where: the destination location in the remote instance
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'mkdir', self.default_path_aws + where + folder])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(["ssh", "-i", key, username, 'mkdir', self.default_path_aws + where + folder])
return "Success to create the folder " + folder + " in " + self.default_path_aws + where
except:
return "Faile to access the instance"
def read_file(self, instance, file, where):
"""
read file from the instance
:param instance: the instance that we want to access
:param file: the file name that we want to read
:param where: the location of the file in the instance
:return output: the content of file
"""
instance = self.get_instance(instance)
output = ""
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(
["ssh", key, username, 'cat', self.default_path_aws + where + file]).decode("utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", "-i", key, username, 'cat', self.default_path_aws + where + file]).decode("utf-8")
return output
except:
return "Faile to access the instance"
def download_file(self, instance, file, where, local):
"""
download file from instance to local
:param instance: the instance that we want to access
:param file: the file name that we want to download
:param where: the directory path of the file in the instance
:param local: the local destination that we want to save the file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, username + ":" + self.default_path_aws + where + file, local])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["scp", "-i", key, username + ':' + self.default_path_aws + where + file, local])
return "Success to download file " + self.default_path_aws + where + file + " to " + local
except:
return "Faile to access the instance"
def download_folder(self, instance, folder, where, local):
"""
download folder from instance to local
:param instance: the instance that we want to access
:param folder: the folder name that we want to download
:param where: the directory path of the folder in the instance
:param local: the local destination that we want to save the folder
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(
["scp", key, '-r', username + ":" + self.default_path_aws + where + folder, local])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["scp", "-i", key, '-r', username + ':' + self.default_path_aws + where + folder, local])
return "Success to download folder " + self.default_path_aws + where + folder + " to " + local
except:
return "Faile to access the instance"
def check_process(self, instance, process):
"""
check where the process is running or not
:param instance: the instance that we want to access
:param process: the process name
:return output: the information of the running process
"""
instance = self.get_instance(instance)
output = ""
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(["ssh", key, username, 'ps', 'aux', '|', 'grep', process]).decode(
"utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", '-i', key, username, 'ps', 'aux', '|', 'grep', process]).decode("utf-8")
return output
except:
return "Faile to access the instance"
| 45.51495
| 118
| 0.564161
| 1,526
| 13,700
| 4.975098
| 0.080603
| 0.089831
| 0.115911
| 0.131718
| 0.863936
| 0.82205
| 0.804663
| 0.800053
| 0.793862
| 0.752766
| 0
| 0.004317
| 0.306788
| 13,700
| 300
| 119
| 45.666667
| 0.795093
| 0.241898
| 0
| 0.711656
| 0
| 0
| 0.183338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07362
| false
| 0
| 0.006135
| 0
| 0.214724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a6c3f96b7909d2e2755a500bcd6ce3c2ca94c43c
| 11,416
|
py
|
Python
|
template/tests/load_dat.py
|
ajmaurais/peptide_analyzer
|
62f37d88fefd0a8cfb57a8c157cfc85692956360
|
[
"MIT"
] | null | null | null |
template/tests/load_dat.py
|
ajmaurais/peptide_analyzer
|
62f37d88fefd0a8cfb57a8c157cfc85692956360
|
[
"MIT"
] | null | null | null |
template/tests/load_dat.py
|
ajmaurais/peptide_analyzer
|
62f37d88fefd0a8cfb57a8c157cfc85692956360
|
[
"MIT"
] | null | null | null |
import sys
import os
from collections import Counter
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
dat_std = pd.read_csv(os.path.dirname(os.path.abspath(__file__)) + '/data/std_output.tsv', sep='\t')
atom_counts = {'A': Counter({'C': 3,
'H': 5,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'C': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 1,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'D': Counter({'C': 4,
'H': 5,
'O': 3,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'E': Counter({'C': 5,
'H': 7,
'O': 3,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'F': Counter({'C': 9,
'H': 9,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'G': Counter({'C': 2,
'H': 3,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'H': Counter({'C': 6,
'H': 7,
'O': 1,
'N': 3,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'I': Counter({'C': 6,
'H': 11,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'K': Counter({'C': 6,
'H': 12,
'O': 1,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'L': Counter({'C': 6,
'H': 11,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'M': Counter({'C': 5,
'H': 9,
'O': 1,
'N': 1,
'S': 1,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'N': Counter({'C': 4,
'H': 6,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'P': Counter({'C': 5,
'H': 7,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'Q': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'R': Counter({'C': 6,
'H': 12,
'O': 1,
'N': 4,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'S': Counter({'C': 3,
'H': 5,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'T': Counter({'C': 4,
'H': 7,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'V': Counter({'C': 5,
'H': 9,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'W': Counter({'C': 11,
'H': 10,
'O': 1,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'Y': Counter({'C': 9,
'H': 9,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'U': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 1,
'Cl': 0,
'Br': 0}),
'C_term': Counter({'C': 0,
'H': 1,
'O': 1,
'N': 0,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'N_term': Counter({'C': 0,
'H': 1,
'O': 0,
'N': 0,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'*': Counter({'C': 24,
'H': 36,
'O': 3,
'N': 6,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0})}
| 38.053333
| 100
| 0.102313
| 739
| 11,416
| 1.56157
| 0.090663
| 0.045061
| 0.083189
| 0.103986
| 0.804159
| 0.792894
| 0.737435
| 0.657712
| 0.634315
| 0.634315
| 0
| 0.155398
| 0.765505
| 11,416
| 299
| 101
| 38.180602
| 0.275682
| 0
| 0
| 0.863946
| 0
| 0
| 0.059926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013605
| 0
| 0.013605
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
470eba693eedc484f3528cebad4ea8fde2a80a34
| 47
|
py
|
Python
|
Exercise/t4.py
|
Twenkid/Python-Various
|
cb6d704724d0e0325cf05a2f95b08cc892ff0857
|
[
"MIT"
] | null | null | null |
Exercise/t4.py
|
Twenkid/Python-Various
|
cb6d704724d0e0325cf05a2f95b08cc892ff0857
|
[
"MIT"
] | null | null | null |
Exercise/t4.py
|
Twenkid/Python-Various
|
cb6d704724d0e0325cf05a2f95b08cc892ff0857
|
[
"MIT"
] | null | null | null |
#t4.py
def kaka(a):
print(a*465546 + 2342)
| 11.75
| 23
| 0.595745
| 9
| 47
| 3.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 0.212766
| 47
| 4
| 23
| 11.75
| 0.459459
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
471ea69b41d7dcaee6304d49c046c2751ce16a2b
| 8,504
|
py
|
Python
|
model.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | 1
|
2021-07-15T18:47:02.000Z
|
2021-07-15T18:47:02.000Z
|
model.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | null | null | null |
model.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | null | null | null |
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import torchvision
import torch
from torchvision import models, datasets
class CRNN_Base(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
super(CRNN_Base, self).__init__()
input_shape = (c, h, w)
# CNN
self.bn0 = nn.BatchNorm2d(num_features=c)
self.pad1 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv1 = nn.Conv2d(c, filters[0], kernel_size=k, stride=1)
self.act1 = nn.ELU()
self.bn1 = nn.BatchNorm2d(num_features=filters[0])
self.maxPool1 = nn.MaxPool2d(kernel_size=poolings[0], stride=poolings[0])
self.drouput1 = nn.Dropout2d(dropout_rate)
self.pad2 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv2 = nn.Conv2d(filters[0], filters[1], kernel_size=k)
self.act2 = nn.ELU()
self.bn2 = nn.BatchNorm2d(num_features=filters[1])
self.maxPool2 = nn.MaxPool2d(kernel_size=poolings[1], stride=poolings[1])
self.drouput2 = nn.Dropout2d(dropout_rate)
self.pad3 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv3 = nn.Conv2d(filters[1], filters[2], kernel_size=k)
self.act3 = nn.ELU()
self.bn3 = nn.BatchNorm2d(num_features=filters[2])
self.maxPool3 = nn.MaxPool2d(kernel_size=poolings[2], stride=poolings[2])
self.drouput3 = nn.Dropout2d(dropout_rate)
self.pad4 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv4 = nn.Conv2d(filters[2], filters[3], kernel_size=k)
self.act4 = nn.ELU()
self.bn4 = nn.BatchNorm2d(num_features=filters[3])
self.maxPool4 = nn.MaxPool2d(kernel_size=poolings[3],stride=poolings[3])
self.drouput4 = nn.Dropout2d(dropout_rate)
# Output is (m, chan, freq, time) -> Needs to be reshaped for feeding to GRU units
# We will handle the reshape in the forward method
# RNN
self.gru = nn.GRU(input_size=256, hidden_size=32, batch_first=True, num_layers=2, dropout=gru_dropout)
#self.gru2 = nn.GRU(input_size=32, hidden_size=32, batch_first=True, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# CNN forward
x = self.bn0(x)
x = self.pad1(x)
x = self.conv1(x)
x = self.act1(x)
x = self.bn1(x)
x = self.maxPool1(x)
x = self.drouput1(x)
x = self.pad2(x)
x = self.conv2(x)
x = self.act2(x)
x = self.bn2(x)
x = self.maxPool2(x)
x = self.drouput2(x)
x = self.pad3(x)
x = self.conv3(x)
x = self.act3(x)
x = self.bn3(x)
x = self.maxPool3(x)
x = self.drouput3(x)
x = self.pad4(x)
x = self.conv4(x)
x = self.act4(x)
x = self.bn4(x)
x = self.maxPool4(x)
x = self.drouput4(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x
class CRNN_Larger(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
super(CRNN_Larger, self).__init__()
input_shape = (c, h, w)
# CNN
self.bn0 = nn.BatchNorm2d(num_features=c)
self.pad1 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv1 = nn.Conv2d(c, filters[0], kernel_size=k, stride=1)
self.act1 = nn.ELU()
self.bn1 = nn.BatchNorm2d(num_features=filters[0])
self.maxPool1 = nn.MaxPool2d(kernel_size=poolings[0], stride=poolings[0])
self.drouput1 = nn.Dropout2d(dropout_rate)
self.pad2 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv2 = nn.Conv2d(filters[0], filters[1], kernel_size=k)
self.act2 = nn.ELU()
self.bn2 = nn.BatchNorm2d(num_features=filters[1])
self.maxPool2 = nn.MaxPool2d(kernel_size=poolings[1], stride=poolings[1])
self.drouput2 = nn.Dropout2d(dropout_rate)
self.pad3 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv3 = nn.Conv2d(filters[1], filters[2], kernel_size=k)
self.act3 = nn.ELU()
self.bn3 = nn.BatchNorm2d(num_features=filters[2])
self.maxPool3 = nn.MaxPool2d(kernel_size=poolings[2], stride=poolings[2])
self.drouput3 = nn.Dropout2d(dropout_rate)
self.pad4 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv4 = nn.Conv2d(filters[2], filters[3], kernel_size=k)
self.act4 = nn.ELU()
self.bn4 = nn.BatchNorm2d(num_features=filters[3])
self.maxPool4 = nn.MaxPool2d(kernel_size=poolings[3],stride=poolings[3])
self.drouput4 = nn.Dropout2d(dropout_rate)
self.pad5 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv5 = nn.Conv2d(filters[3], filters[4], kernel_size=k)
self.act5 = nn.ELU()
self.bn5 = nn.BatchNorm2d(num_features=filters[4])
self.maxPool5 = nn.MaxPool2d(kernel_size=poolings[4],stride=poolings[4])
self.drouput5 = nn.Dropout2d(dropout_rate)
# Output is (m, chan, freq, time) -> Needs to be reshaped for feeding to GRU units
# We will handle the reshape in the forward method
# RNN
self.gru = nn.GRU(input_size=1024, hidden_size=32, batch_first=True, num_layers=2, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# CNN forward
x = self.bn0(x)
x = self.pad1(x)
x = self.conv1(x)
x = self.act1(x)
x = self.bn1(x)
x = self.maxPool1(x)
x = self.drouput1(x)
x = self.pad2(x)
x = self.conv2(x)
x = self.act2(x)
x = self.bn2(x)
x = self.maxPool2(x)
x = self.drouput2(x)
x = self.pad3(x)
x = self.conv3(x)
x = self.act3(x)
x = self.bn3(x)
x = self.maxPool3(x)
x = self.drouput3(x)
x = self.pad4(x)
x = self.conv4(x)
x = self.act4(x)
x = self.bn4(x)
x = self.maxPool4(x)
x = self.drouput4(x)
x = self.pad5(x)
x = self.conv5(x)
x = self.act5(x)
x = self.bn5(x)
x = self.maxPool5(x)
x = self.drouput5(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x
class CRNN_ResNet18(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
# Backbone
super(CRNN_ResNet18, self).__init__()
input_shape = (c, h, w)
self.backbone = torchvision.models.resnet18(pretrained=True)
modules = list(self.backbone.children())[:-1]
self.backbone = nn.Sequential(*modules)
ct = 0
for child in self.backbone.children():
ct += 1
if ct < 7:
for param in child.parameters():
param.requires_grad = False
# RNN
self.gru = nn.GRU(input_size=512, hidden_size=32, batch_first=True, num_layers=3, dropout=gru_dropout)
#self.gru2 = nn.GRU(input_size=32, hidden_size=32, batch_first=True, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# Backbone forward
x = self.backbone(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x
| 36.497854
| 111
| 0.577493
| 1,283
| 8,504
| 3.742011
| 0.110678
| 0.068736
| 0.071235
| 0.044991
| 0.864612
| 0.851281
| 0.851281
| 0.841908
| 0.834618
| 0.834618
| 0
| 0.054395
| 0.273636
| 8,504
| 233
| 112
| 36.497854
| 0.722843
| 0.081961
| 0
| 0.773256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034884
| false
| 0
| 0.046512
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ba7dd577c3e8828d8289625c9be21e83ca75ece
| 2,378
|
py
|
Python
|
api/migrations/0076_auto_20200728_1500.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 11
|
2018-06-11T06:05:12.000Z
|
2022-03-25T09:31:44.000Z
|
api/migrations/0076_auto_20200728_1500.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 498
|
2017-11-07T21:20:13.000Z
|
2022-03-31T14:37:18.000Z
|
api/migrations/0076_auto_20200728_1500.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 6
|
2018-04-11T13:29:50.000Z
|
2020-07-16T16:52:11.000Z
|
# Generated by Django 2.2.13 on 2020-07-28 15:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0075_profile_last_frontend_login'),
]
operations = [
migrations.RemoveField(
model_name='fieldreport',
name='cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_suspected_cases',
),
]
| 27.651163
| 52
| 0.543314
| 188
| 2,378
| 6.579787
| 0.207447
| 0.305578
| 0.378335
| 0.43654
| 0.86823
| 0.86823
| 0.831851
| 0.789006
| 0.205335
| 0.109943
| 0
| 0.013046
| 0.355341
| 2,378
| 85
| 53
| 27.976471
| 0.793868
| 0.019344
| 0
| 0.683544
| 1
| 0
| 0.233476
| 0.064807
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012658
| 0
| 0.050633
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f3290a43c48894b4555f25b583136c942a6ca761
| 13,539
|
py
|
Python
|
ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 2
|
2021-11-16T10:00:33.000Z
|
2021-12-13T02:57:40.000Z
|
ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | null | null | null |
ext/ANTsPyNet/antspynet/architectures/create_densenet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 1
|
2021-12-13T02:57:27.000Z
|
2021-12-13T02:57:27.000Z
|
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Input, Dropout, BatchNormalization,
Activation, Dense, Concatenate,
Conv2D, Conv2DTranspose, GlobalAveragePooling2D,
Conv3D, Conv3DTranspose, GlobalAveragePooling3D)
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
def create_densenet_model_2d(input_image_size,
number_of_classification_labels=1000,
number_of_filters=16,
depth=7,
number_of_dense_blocks=1,
growth_rate=12,
dropout_rate=0.2,
weight_decay=1e-4,
mode='classification'
):
"""
2-D implementation of the Wide ResNet deep learning architecture.
Creates a keras model of the DenseNet deep learning architecture for image
recognition based on the paper
G. Huang, Z. Liu, K. Weinberger, and L. van der Maaten. Densely Connected
Convolutional Networks Networks
available here:
https://arxiv.org/abs/1608.06993
This particular implementation was influenced by the following python
implementation:
https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
Arguments
---------
input_image_size : tuple of length 3
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : integer
Number of classification labels.
number_of_filters : integer
Number of filters.
depth : integer
Number of layers---must be equal to 3 * N + 4 where N is an integer (default = 7).
number_of_dense_blocks : integer
Number of dense blocks number of dense blocks to add to the end (default = 1).
growth_rate : integer
Number of filters to add for each dense block layer (default = 12).
dropout_rate : scalar
Per drop out layer rate (default = 0.2).
weight_decay : scalar
Weight decay (default = 1e-4).
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 2-D Keras model defining the network.
Example
-------
>>> model = create_densenet_model_2d((128, 128, 1))
>>> model.summary()
"""
concatenation_axis = 0
if K.image_data_format() == 'channels_last':
concatenation_axis = -1
def convolution_factory_2d(model, number_of_filters, kernel_size=(3, 3),
dropout_rate=0.0, weight_decay=1e-4):
model = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(model)
model = Activation(activation='relu')(model)
model = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
padding='same',
use_bias=False,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
if dropout_rate > 0.0:
model = Dropout(rate=dropout_rate)(model)
return(model)
def transition_2d(model, number_of_filters, dropout_rate=0.0, weight_decay=1e-4):
model = convolution_factory_2d(model, number_of_filters, kernel_size=(1, 1),
dropout_rate=dropout_rate, weight_decay=weight_decay)
model = AveragePooling2D(pool_size=(2, 2),
strides=(2, 2))(model)
return(model)
def create_dense_blocks_2d(model, number_of_filters, depth, growth_rate,
dropout_rate=0.0, weight_decay=1e-4):
dense_block_layers = [model]
for i in range(depth):
model = convolution_factory_2d(model, number_of_filters=growth_rate,
kernel_size=(3, 3), dropout_rate=dropout_rate,
weight_decay=weight_decay)
dense_block_layers.append(model)
model = Concatenate(axis=concatenation_axis)(dense_block_layers)
number_of_filters += growth_rate
return(model, number_of_filters)
if ((depth - 4) % 3) != 0:
raise ValueError('Depth must be equal to 3*N+4 where N is an integer.')
number_of_layers = int((depth - 4) / 3)
inputs = Input(shape = input_image_size)
outputs = Conv2D(filters=number_of_filters,
kernel_size=(3, 3),
kernel_initializer='he_uniform',
padding='same',
use_bias=False,
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
# Add dense blocks
nFilters = number_of_filters
for i in range(number_of_dense_blocks - 1):
outputs, nFilters = \
create_dense_blocks_2d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = transition_2d(outputs, number_of_filters=nFilters,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs, nFilters = \
create_dense_blocks_2d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(outputs)
outputs = Activation(activation='relu')(outputs)
outputs = GlobalAveragePooling2D()(outputs)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layerActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(units=number_of_classification_labels,
activation=layer_activation,
kernel_regularizer=regularizers.l2(weight_decay),
bias_regularizer=regularizers.l2(weight_decay))(outputs)
densenet_model = Model(inputs=inputs, outputs=outputs)
return(densenet_model)
def create_densenet_model_3d(input_image_size,
number_of_classification_labels=1000,
number_of_filters=16,
depth=7,
number_of_dense_blocks=1,
growth_rate=12,
dropout_rate=0.2,
weight_decay=1e-4,
mode='classification'
):
"""
2-D implementation of the Wide ResNet deep learning architecture.
Creates a keras model of the DenseNet deep learning architecture for image
recognition based on the paper
G. Huang, Z. Liu, K. Weinberger, and L. van der Maaten. Densely Connected
Convolutional Networks Networks
available here:
https://arxiv.org/abs/1608.06993
This particular implementation was influenced by the following python
implementation:
https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : integer
Number of classification labels.
number_of_filters : integer
Number of filters.
depth : integer
Number of layers---must be equal to 3 * N + 4 where N is an integer (default = 7).
number_of_dense_blocks : integer
Number of dense blocks number of dense blocks to add to the end (default = 1).
growth_rate : integer
Number of filters to add for each dense block layer (default = 12).
dropout_rate : scalar
Per drop out layer rate (default = 0.2).
weight_decay : scalar
Weight decay (default = 1e-4).
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 3-D Keras model defining the network.
Example
-------
>>> model = create_densenet_model_3d((128, 128, 128, 1))
>>> model.summary()
"""
concatenation_axis = 0
if K.image_data_format() == 'channels_last':
concatenation_axis = -1
def convolution_factory_3d(model, number_of_filters, kernel_size=(3, 3, 3),
dropout_rate=0.0, weight_decay=1e-4):
model = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(model)
model = Activation(activation='relu')(model)
model = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
padding='same',
use_bias=False,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
if dropout_rate > 0.0:
model = Dropout(rate=dropout_rate)(model)
return(model)
def transition_3d(model, number_of_filters, dropout_rate=0.0, weight_decay=1e-4):
model = convolution_factory_3d(model, number_of_filters, kernel_size=(1, 1, 1),
dropout_rate=dropout_rate, weight_decay=weight_decay)
model = AveragePooling3D(pool_size=(2, 2, 2),
strides=(2, 2, 2))(model)
return(model)
def create_dense_blocks_3d(model, number_of_filters, depth, growth_rate,
dropout_rate=0.0, weight_decay=1e-4):
dense_block_layers = [model]
for i in range(depth):
model = convolution_factory_3d(model, number_of_filters=growth_rate,
kernel_size=(3, 3, 3), dropout_rate=dropout_rate,
weight_decay=weight_decay)
dense_block_layers.append(model)
model = Concatenate(axis=concatenation_axis)(dense_block_layers)
number_of_filters += growth_rate
return(model, number_of_filters)
if ((depth - 4) % 3) != 0:
raise ValueError('Depth must be equal to 3*N+4 where N is an integer.')
number_of_layers = int((depth - 4) / 3)
inputs = Input(shape = input_image_size)
outputs = Conv3D(filters=number_of_filters,
kernel_size=(3, 3, 3),
kernel_initializer='he_uniform',
padding='same',
use_bias=False,
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
# Add dense blocks
nFilters = number_of_filters
for i in range(number_of_dense_blocks - 1):
outputs, nFilters = \
create_dense_blocks_3d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = transition_3d(outputs, number_of_filters=nFilters,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs, nFilters = \
create_dense_blocks_3d(outputs, number_of_filters=nFilters,
depth=number_of_layers, growth_rate=growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
outputs = BatchNormalization(axis=concatenation_axis,
gamma_regularizer=regularizers.l2(weight_decay),
beta_regularizer=regularizers.l2(weight_decay))(outputs)
outputs = Activation(activation='relu')(outputs)
outputs = GlobalAveragePooling3D()(outputs)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layerActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(units=number_of_classification_labels,
activation=layer_activation,
kernel_regularizer=regularizers.l2(weight_decay),
bias_regularizer=regularizers.l2(weight_decay))(outputs)
densenet_model = Model(inputs=inputs, outputs=outputs)
return(densenet_model)
| 37.818436
| 104
| 0.602482
| 1,466
| 13,539
| 5.329468
| 0.133015
| 0.063484
| 0.065276
| 0.063484
| 0.937796
| 0.93434
| 0.933828
| 0.930756
| 0.921669
| 0.895559
| 0
| 0.023574
| 0.320112
| 13,539
| 357
| 105
| 37.92437
| 0.825204
| 0.237979
| 0
| 0.793296
| 0
| 0
| 0.038925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044693
| false
| 0
| 0.03352
| 0
| 0.078212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3436f7a9fc684a11402ed73af31d4c502f05580
| 9,023
|
py
|
Python
|
pyworkforce/shifts/tests/test_shifts.py
|
rodrigo-arenas/pyworkforce
|
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
|
[
"MIT"
] | 10
|
2021-03-20T02:58:52.000Z
|
2022-03-28T05:58:56.000Z
|
pyworkforce/shifts/tests/test_shifts.py
|
rodrigo-arenas/pyworkforce
|
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
|
[
"MIT"
] | 3
|
2021-03-13T02:11:39.000Z
|
2021-04-08T01:27:36.000Z
|
pyworkforce/shifts/tests/test_shifts.py
|
rodrigo-arenas/pyworkforce
|
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
|
[
"MIT"
] | 1
|
2022-01-04T11:06:47.000Z
|
2022-01-04T11:06:47.000Z
|
from pyworkforce.shifts import MinAbsDifference, MinRequiredResources
import pytest
def test_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_infeasible_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=10,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
def test_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Afternoon": 8, "Night": 10, "Mixed": 7}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_wrong_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Night": 10, "Mixed": 7}
num_days = 2
with pytest.raises(Exception) as excinfo:
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert str(excinfo.value) == "cost_dict must have the same keys as shifts_coverage"
def test_infeasible_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
| 49.850829
| 109
| 0.477557
| 1,392
| 9,023
| 2.990661
| 0.056034
| 0.164305
| 0.21619
| 0.247898
| 0.946193
| 0.946193
| 0.946193
| 0.946193
| 0.937785
| 0.937785
| 0
| 0.197612
| 0.359526
| 9,023
| 180
| 110
| 50.127778
| 0.522755
| 0
| 0
| 0.868056
| 0
| 0
| 0.080794
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.041667
| false
| 0
| 0.013889
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f34466e2623ecc3731ee9a82d535b049123cd382
| 4,253
|
py
|
Python
|
accelbyte_py_sdk/api/eventlog/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/eventlog/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/eventlog/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-event-log-service."""
__version__ = ""
__author__ = "AccelByte"
__email__ = "dev@accelbyte.net"
# pylint: disable=line-too-long
# event
from .wrappers import get_event_by_event_id_handler
from .wrappers import get_event_by_event_id_handler_async
from .wrappers import get_event_by_event_type_and_event_id_handler
from .wrappers import get_event_by_event_type_and_event_id_handler_async
from .wrappers import get_event_by_event_type_handler
from .wrappers import get_event_by_event_type_handler_async
from .wrappers import get_event_by_namespace_handler
from .wrappers import get_event_by_namespace_handler_async
from .wrappers import get_event_by_user_event_id_and_event_type_handler
from .wrappers import get_event_by_user_event_id_and_event_type_handler_async
from .wrappers import get_event_by_user_id_and_event_id_handler
from .wrappers import get_event_by_user_id_and_event_id_handler_async
from .wrappers import get_event_by_user_id_and_event_type_handler
from .wrappers import get_event_by_user_id_and_event_type_handler_async
from .wrappers import get_event_by_user_id_handler
from .wrappers import get_event_by_user_id_handler_async
from .wrappers import post_event_handler
from .wrappers import post_event_handler_async
# event_descriptions
from .wrappers import agent_type_description_handler
from .wrappers import agent_type_description_handler_async
from .wrappers import event_id_description_handler
from .wrappers import event_id_description_handler_async
from .wrappers import event_level_description_handler
from .wrappers import event_level_description_handler_async
from .wrappers import event_type_description_handler
from .wrappers import event_type_description_handler_async
from .wrappers import specific_agent_type_description_handler
from .wrappers import specific_agent_type_description_handler_async
from .wrappers import specific_event_id_description_handler
from .wrappers import specific_event_id_description_handler_async
from .wrappers import specific_event_level_description_handler
from .wrappers import specific_event_level_description_handler_async
from .wrappers import specific_event_type_description_handler
from .wrappers import specific_event_type_description_handler_async
from .wrappers import specific_ux_description_handler
from .wrappers import specific_ux_description_handler_async
from .wrappers import ux_name_description_handler
from .wrappers import ux_name_description_handler_async
# event_registry
from .wrappers import get_registered_event_id_handler
from .wrappers import get_registered_event_id_handler_async
from .wrappers import get_registered_events_by_event_type_handler
from .wrappers import get_registered_events_by_event_type_handler_async
from .wrappers import get_registered_events_handler
from .wrappers import get_registered_events_handler_async
from .wrappers import register_event_handler
from .wrappers import register_event_handler_async
from .wrappers import unregister_event_id_handler
from .wrappers import unregister_event_id_handler_async
from .wrappers import update_event_registry_handler
from .wrappers import update_event_registry_handler_async
# event_v2
from .wrappers import get_event_specific_user_v2_handler
from .wrappers import get_event_specific_user_v2_handler_async
from .wrappers import get_public_edit_history
from .wrappers import get_public_edit_history_async
from .wrappers import get_user_events_v2_public
from .wrappers import get_user_events_v2_public_async
from .wrappers import query_event_stream_handler
from .wrappers import query_event_stream_handler_async
# user_information
from .wrappers import delete_user_activities_handler
from .wrappers import delete_user_activities_handler_async
from .wrappers import get_user_activities_handler
from .wrappers import get_user_activities_handler_async
from .wrappers import last_user_activity_time_handler
from .wrappers import last_user_activity_time_handler_async
| 47.255556
| 88
| 0.894663
| 627
| 4,253
| 5.553429
| 0.141946
| 0.220563
| 0.330844
| 0.180931
| 0.878231
| 0.875359
| 0.82826
| 0.564905
| 0.316485
| 0.225158
| 0
| 0.002297
| 0.078768
| 4,253
| 89
| 89
| 47.786517
| 0.886422
| 0.101575
| 0
| 0
| 1
| 0
| 0.006835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.955224
| 0
| 0.955224
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
f3524d7786f294f680962dddd5beb46c86fd8c5e
| 15,143
|
py
|
Python
|
experiments.py
|
joshsanz/learned_uncertainty
|
2103126105dbe44cfe75fc22291ba669c1a162f3
|
[
"MIT"
] | null | null | null |
experiments.py
|
joshsanz/learned_uncertainty
|
2103126105dbe44cfe75fc22291ba669c1a162f3
|
[
"MIT"
] | null | null | null |
experiments.py
|
joshsanz/learned_uncertainty
|
2103126105dbe44cfe75fc22291ba669c1a162f3
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
import time
from data_models import *
from prediction_models import *
from control_models import *
def error(predicted_return, true_return):
return (predicted_return - true_return)
def get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
sampler = GaussianNoise(seed)
data = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
sampler_input = (true_asset_value, asset_covariance)
data[t] = sampler.sample(sampler_input)
return data
def get_wiener_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
steps = get_gaussian_data(num_samples, np.zeros((num_assets,)), asset_covariance, seed)
return np.cumsum(steps, axis=0) + true_asset_value
def get_real_data():
sampler = RealData()
return sampler.labels(), sampler.dates(), sampler.sample()
def get_returns(data, investment_strategies, asset_predictions):
num_samples = investment_strategies.shape[0]
predicted_return = np.zeros(shape=(num_samples,))
true_return = np.zeros(shape=(num_samples,))
for t in range(num_samples):
if t <= 2:
continue
observed_asset_value = data[t]
predicted_asset_value = asset_predictions[t]
investment_strategy = investment_strategies[t]
true_return[t] = investment_strategy.dot(observed_asset_value)
predicted_return[t] = investment_strategy.dot(predicted_asset_value)
return predicted_return, true_return
def run_gaussian_norm(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
regularization = control_params['regularization']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = NormModel(num_assets=num_assets, gamma=gamma, regularization=regularization)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_gaussian_covar(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = CovarianceModel(num_assets=num_assets, gamma=gamma)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_simple_gaussian_experiments(params, real_data=False, plot=False, seed=1):
if not real_data:
num_samples = 100
true_asset_value = params['asset_value']
asset_covariance = params['asset_covariance']
data = get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
else:
data_labels, data_dates, data = get_real_data()
print("date range:", data_dates[0][0], "-", data_dates[0][-1])
num_samples = data.shape[0]
gamma = params['gamma']
window = params['window']
num_assets = data.shape[1]
if plot:
if real_data:
for i in range(num_assets):
plt.plot(data.T[i], label=data_labels[i])
else:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.5)
# In final plots, predicted return may not be relevant.
# plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_ltv_gaussian_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
true_asset_delta = params['asset_delta']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
true_asset_value = true_asset_v0 + (true_asset_delta.T @ np.arange(0,num_samples).reshape(-1,1).T).T
data = get_gaussian_data(num_samples, np.zeros((3,)), asset_covariance, seed) + true_asset_value
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_wiener_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
data = get_wiener_data(num_samples, true_asset_v0, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
if __name__ == "__main__":
run_simple_gaussian_experiments(params={'gamma': 1,
'window': 10},
real_data=True,
plot=True, seed=int(time.time()))
run_simple_gaussian_experiments(params={'asset_value': np.array([0.8, 1.0, 1.1]),
'asset_covariance': np.diag([0.02, 0.01, 0.03]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_ltv_gaussian_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'asset_delta': np.array([[0.002, -0.003, 0.001]]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_wiener_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
| 44.801775
| 111
| 0.606947
| 1,756
| 15,143
| 4.960137
| 0.090547
| 0.04248
| 0.034443
| 0.028932
| 0.86372
| 0.834328
| 0.80884
| 0.79093
| 0.779104
| 0.76349
| 0
| 0.015598
| 0.280262
| 15,143
| 337
| 112
| 44.934718
| 0.783558
| 0.074556
| 0
| 0.751908
| 0
| 0
| 0.100086
| 0.009866
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038168
| false
| 0
| 0.022901
| 0.003817
| 0.099237
| 0.015267
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
346dfc38e5228510fbb6d8575a372cd8d9bac798
| 9,074
|
py
|
Python
|
src/thex/apps/utils/signal_utils.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
src/thex/apps/utils/signal_utils.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
src/thex/apps/utils/signal_utils.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import plotly
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# -------------------- Graphing Functions --------------------
def single_chromosome_graph_line(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.line(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
line=dict(width=float(marker_width)),
)
return fig
def single_chromosome_graph_scatter(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.scatter(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
marker=dict(size=float(marker_width)),
)
return fig
def whole_genome_line(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
fig = make_subplots(
rows=len(chromosomes),
cols=1,
x_title="Position",
y_title="Edit Me!",
row_titles=chromosomes,
row_heights=[2]*len(chromosomes),
)
for n, sample in enumerate(samples):
legend_flag = True
for row, current_chromosome in enumerate(chromosomes, start=1):
filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
sample_chromosome_data = df[filt]
# Make figure
fig.add_trace(
go.Scatter(
x=sample_chromosome_data['Window'],
y=sample_chromosome_data['Value'],
mode='lines',
legendgroup=str(sample),
name=sample,
line=dict(
color=colors[n],
width=float(marker_width)
),
showlegend=legend_flag,
),
row=row,
col=1
)
legend_flag = False
continue
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family="Arial",
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
def whole_genome_scatter(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# fig = make_subplots(
# rows=len(chromosomes),
# cols=1,
# x_title="Position",
# y_title="Edit Me!",
# row_titles=chromosomes,
# row_heights=[2]*len(chromosomes),
# )
# for n, sample in enumerate(samples):
# legend_flag = True
# for row, current_chromosome in enumerate(chromosomes, start=1):
# filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
# sample_chromosome_data = df[filt]
# # Make figure
# fig.add_trace(
# go.Scatter(
# x=sample_chromosome_data['Window'],
# y=sample_chromosome_data['Value'],
# mode='markers',
# legendgroup=str(sample),
# name=sample,
# line=dict(
# color=colors[n],
# width=float(marker_width)
# ),
# showlegend=legend_flag,
# ),
# row=row,
# col=1
# )
# legend_flag = False
# continue
fig = px.scatter(
df,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
# height=500,
facet_row="Chromosome",
)
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family=font_family,
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
title='',
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(marker=dict(size=float(marker_width)))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
# -------------------- File Validation --------------------
def validate_signal_tracer_headers(df):
"""Validate that headers are correct"""
expected_headers = ["Chromosome", "Window", "Sample", "Value"]
try:
assert list(df.columns) == expected_headers
return True
except AssertionError:
return False
def validate_signal_tracer_values(xlsx_df):
"""Return False if value column data are not int or float"""
try:
assert xlsx_df['Value'].dtype != "object"
return True
except AssertionError:
return False
def validate_file_type(filename):
"""Return False if file type is not valid """
valid_filetypes = ['.tsv', '.csv', '.xlsx', '.txt']
filetype = Path(filename).suffix
if filetype not in valid_filetypes:
return False
else:
return True
| 25.925714
| 88
| 0.528543
| 930
| 9,074
| 4.973118
| 0.198925
| 0.029189
| 0.016865
| 0.024216
| 0.828757
| 0.823351
| 0.814703
| 0.814703
| 0.792216
| 0.776
| 0
| 0.013082
| 0.351333
| 9,074
| 349
| 89
| 26
| 0.772681
| 0.162222
| 0
| 0.739583
| 0
| 0
| 0.058011
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 1
| 0.024306
| false
| 0
| 0.027778
| 0
| 0.086806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bd08961b282ce3b26745bfd817a53d3ce607b1f
| 164
|
py
|
Python
|
swingtrader/stockmarketapi/__init__.py
|
kabylkas/swingtrader
|
8682e33464883f54b80f9764cfaf3cc9248774a0
|
[
"Apache-2.0"
] | null | null | null |
swingtrader/stockmarketapi/__init__.py
|
kabylkas/swingtrader
|
8682e33464883f54b80f9764cfaf3cc9248774a0
|
[
"Apache-2.0"
] | null | null | null |
swingtrader/stockmarketapi/__init__.py
|
kabylkas/swingtrader
|
8682e33464883f54b80f9764cfaf3cc9248774a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021-2022 Kabylkas Labs.
# Licensed under the Apache License, Version 2.0.
from .stockmarketapi import Stock
from .stockmarketapi import StockBucket
| 41
| 49
| 0.804878
| 22
| 164
| 6
| 0.863636
| 0.272727
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06993
| 0.128049
| 164
| 4
| 50
| 41
| 0.853147
| 0.52439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1bd5e7384dac3f4e1c314c62e84166a6ae616194
| 137
|
py
|
Python
|
lib/nginxlib.py
|
charmed-kubernetes/juju-layer-nginx
|
672d27695b512e50f51777b1eb63c5ff157b3d9e
|
[
"MIT"
] | 1
|
2015-11-04T03:40:24.000Z
|
2015-11-04T03:40:24.000Z
|
lib/nginxlib.py
|
charmed-kubernetes/juju-layer-nginx
|
672d27695b512e50f51777b1eb63c5ff157b3d9e
|
[
"MIT"
] | null | null | null |
lib/nginxlib.py
|
charmed-kubernetes/juju-layer-nginx
|
672d27695b512e50f51777b1eb63c5ff157b3d9e
|
[
"MIT"
] | null | null | null |
from warnings import warn
from charms.layer.nginx import * # noqa
warn('nginxlib is being deprecated, use charms.layer.nginx instead')
| 27.4
| 68
| 0.781022
| 20
| 137
| 5.35
| 0.7
| 0.205607
| 0.299065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138686
| 137
| 4
| 69
| 34.25
| 0.90678
| 0.029197
| 0
| 0
| 0
| 0
| 0.458015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
94248c4b47d73f1af610ca1e30110952cd6738d6
| 1,236
|
py
|
Python
|
filter_local_tool/test_filter.py
|
g-freire/web-parser-tools
|
edbec7b57b33eea8a203e1b32a8c911ef1a22956
|
[
"MIT"
] | 1
|
2019-09-25T21:22:14.000Z
|
2019-09-25T21:22:14.000Z
|
filter_local_tool/test_filter.py
|
g-freire/web-parser-tools
|
edbec7b57b33eea8a203e1b32a8c911ef1a22956
|
[
"MIT"
] | null | null | null |
filter_local_tool/test_filter.py
|
g-freire/web-parser-tools
|
edbec7b57b33eea8a203e1b32a8c911ef1a22956
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from filter_tool import *
# critical URL's that can break the algo
product_pattern = ["https://www.epocacosmeticos.com.br/some_product/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p"]
duplicates = ["https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p"]
notproduct = ["https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/pr","https://www.epocacosmeticos.com.br/p"]
noturl = ['epoca/a/p', 'www.epocacosmeticos.com.br/a/p', '/www.epocacosmeticos.com.br/p', ]
def test_find_pattern_product():
objeto = Mine('','')
assert objeto.find_pattern_product(product_pattern) == ('https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p','https://www.epocacosmeticos.com.br/some_product/p',)
assert objeto.find_pattern_product(duplicates) == ('https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p',)
assert objeto.find_pattern_product(notproduct) == ()
assert objeto.find_pattern_product(noturl) == ()
| 56.181818
| 239
| 0.749191
| 171
| 1,236
| 5.321637
| 0.269006
| 0.237363
| 0.276923
| 0.303297
| 0.820879
| 0.721978
| 0.601099
| 0.57033
| 0.514286
| 0.514286
| 0
| 0.001741
| 0.070388
| 1,236
| 21
| 240
| 58.857143
| 0.790252
| 0.066343
| 0
| 0
| 0
| 0.583333
| 0.615318
| 0.051349
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9474293f5ec239c728c0ec0b3c31ea7b0eb35bf9
| 3,221
|
py
|
Python
|
vb2py/test_at_scale/testheinsega.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/test_at_scale/testheinsega.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/test_at_scale/testheinsega.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from vb2py.test_at_scale import file_tester
class Test_heinsega(file_tester.FileTester):
def test0(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Module1.bas')
def test1(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/start.frm')
def test2(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ShutDownWin.frm')
def test3(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/password_win.frm')
def test4(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_CookiesCtrl.bas')
def test5(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Parsing.bas')
def test6(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/BrowserW.frm')
def test7(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_manifest.bas')
def test8(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Declare_Function.bas')
def test9(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_function.bas')
def test10(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_FileSystem.bas')
def test11(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Transcoding.bas')
def test12(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/History_Logs.frm')
def test13(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/script_from.frm')
def test14(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/CMDresult.bas')
def test15(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/variable.bas')
def test16(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_MouseWheel.bas')
def test17(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_Finish_Download.frm')
def test18(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Ctrl8dot3name.frm')
def test19(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ComDialog.frm')
def test20(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/sys.frm')
def test21(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_163_Module.bas')
def test22(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX163_mainfrm.frm')
if __name__ == '__main__':
unittest.main()
| 40.2625
| 120
| 0.804409
| 449
| 3,221
| 5.556793
| 0.178174
| 0.073747
| 0.147495
| 0.193587
| 0.761523
| 0.761523
| 0.761523
| 0.761523
| 0.761523
| 0.761523
| 0
| 0.06807
| 0.055883
| 3,221
| 79
| 121
| 40.772152
| 0.752384
| 0
| 0
| 0
| 0
| 0.45098
| 0.665217
| 0.662733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.45098
| false
| 0.019608
| 0.039216
| 0
| 0.509804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 11
|
848ca2162eb67138e863cb310ce9975b6442a245
| 4,629
|
py
|
Python
|
customer_api/api/migrations/0008_auto_20171017_1504.py
|
t-yanaka/zabbix-report
|
ef471b60626dd5fef9bcaa74d6cbbc00cca10c9b
|
[
"MIT"
] | 2
|
2017-06-27T00:03:35.000Z
|
2020-09-16T11:47:53.000Z
|
customer_api/api/migrations/0008_auto_20171017_1504.py
|
t-yanaka/zabbix-report
|
ef471b60626dd5fef9bcaa74d6cbbc00cca10c9b
|
[
"MIT"
] | null | null | null |
customer_api/api/migrations/0008_auto_20171017_1504.py
|
t-yanaka/zabbix-report
|
ef471b60626dd5fef9bcaa74d6cbbc00cca10c9b
|
[
"MIT"
] | 3
|
2017-10-13T19:31:08.000Z
|
2020-09-16T11:47:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 06:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20171005_1713'),
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_name', models.CharField(max_length=100)),
('column_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='No_Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='No_Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grep_strings', models.CharField(max_length=100)),
('no_relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Columns')),
],
),
migrations.CreateModel(
name='No_Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condition', models.CharField(max_length=100)),
('relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Columns')),
],
),
migrations.CreateModel(
name='Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Tables',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='skill',
name='category',
),
migrations.DeleteModel(
name='Skill',
),
migrations.DeleteModel(
name='SkillCategory',
),
migrations.AddField(
model_name='relation_columns',
name='relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Table'),
),
migrations.AddField(
model_name='no_relation_columns',
name='no_relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Table'),
),
]
| 42.46789
| 133
| 0.576366
| 455
| 4,629
| 5.676923
| 0.16044
| 0.037166
| 0.059621
| 0.09369
| 0.807588
| 0.759582
| 0.746032
| 0.746032
| 0.720093
| 0.720093
| 0
| 0.017231
| 0.285375
| 4,629
| 108
| 134
| 42.861111
| 0.763603
| 0.01469
| 0
| 0.653465
| 1
| 0
| 0.125932
| 0.014699
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029703
| 0
| 0.059406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
84e45d2b85d8499f1f5a378dd4dacfb53eabf9db
| 13,567
|
py
|
Python
|
plbenchmark/utils.py
|
kgoossens1/protein-ligand-benchmark
|
902753176bf17cb98735c50c6b9d72f559bba2c2
|
[
"CC-BY-4.0",
"MIT"
] | 13
|
2021-05-14T11:43:47.000Z
|
2022-02-03T20:03:13.000Z
|
plbenchmark/utils.py
|
AspirinCode/protein-ligand-benchmark
|
4e6f6801589d28c5ce21268124c7d506d9b6dde4
|
[
"CC-BY-4.0",
"MIT"
] | 14
|
2021-06-24T16:47:08.000Z
|
2022-03-15T09:57:25.000Z
|
plbenchmark/utils.py
|
AspirinCode/protein-ligand-benchmark
|
4e6f6801589d28c5ce21268124c7d506d9b6dde4
|
[
"CC-BY-4.0",
"MIT"
] | 2
|
2021-05-20T02:54:27.000Z
|
2021-09-29T00:14:26.000Z
|
"""
utils.py
Contains utility functions
"""
import numpy as np
from scipy import constants
import requests
import json
from pint import UnitRegistry
import warnings
unit_registry = UnitRegistry()
boltzmann_constant = constants.gas_constant * unit_registry("J / mole / K")
def find_pdb_url(pdb):
"""
Finds the links to a pdb or a list of pdb codes.
:param pdb: string or list of strings
:return: string compiled string including the urls to the pdb entries
"""
if pdb is None:
return ""
if type(pdb) == str:
pdb = [pdb]
result = []
for p in pdb:
url = f"https://data.rcsb.org/rest/v1/core/entry/{p}"
try:
response = requests.get(url)
if response.status_code == requests.codes.ok:
page = response.text
result.append(f"REP1http://www.rcsb.org/structure/{p}REP2{p}REP3")
else:
warnings.warn(f"Could not find PDB {p}")
result.append(p)
except requests.exceptions.RequestException as e:
warnings.warn(f"Could not find PDB {p}\n{e}")
result.append(p)
return ("\n").join(result)
def find_doi_url(doi):
"""
Finds the links to a digital object identifier (doi).
:param doi: string
:return: string compiled string including the urls to the publication
"""
url = "https://api.crossref.org/works/" + str(doi)
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
warnings.warn(f"Could not find DOI: {doi}\n{e}")
if response.status_code == requests.codes.ok:
obj = response.json()
obj = obj["message"]
aut = obj["author"]
if len(aut) > 0:
aut = obj["author"][0]["family"]
else:
aut = ""
tit = obj["short-container-title"]
if len(tit) > 0:
tit = tit[0]
else:
tit = ""
if "published-print" in obj.keys():
dat = obj["published-print"]["date-parts"][0][0]
else:
dat = "XXXX"
desc_string = "{} et al., {} {}".format(
aut, tit, dat
) # , obj['journal-issue']['published-online']['date-parts'][0][0])
return f'REP1{obj["URL"]}REP2{desc_string}REP3'
else:
warnings.warn(f"Could not find DOI: {doi}")
return doi
def convert_value(value, original_type, final_type, temperature=300.0, out_unit=None):
"""
Converts an experimental value into another derived quantity with specified unit.
:param value: float, numerical value
:param original_type: string, code for the original observable. Can be `dg`, `ki`, `ic50`, `pic50`
:param final_type: string, code for the desired derived quantity. Can be `dg`, `ki`, `ic50`, `pic50`
:param temperature: float, temperature in kelvin
:param out_unit: unit of type :py:class:`pint`, output unit of final_type, needs to fit to the requested final_type
:return: :py:class:`pint.Quantity` with desired unit
"""
# define default units
if out_unit is None:
if final_type == "dg":
out_unit = unit_registry("kilocalories / mole")
elif final_type == "ki":
out_unit = unit_registry("nanomolar")
elif final_type == "ic50":
out_unit = unit_registry("nanomolar")
elif final_type == "pic50":
out_unit = unit_registry("")
if original_type == "dg":
if final_type == "dg":
return value.to(out_unit)
elif final_type == "ki":
result = (
np.exp(
-value / (boltzmann_constant * temperature * unit_registry.kelvin)
)
* unit_registry.molar
)
return result.to(out_unit)
elif final_type == "ic50":
result = (
np.exp(
-value / (boltzmann_constant * temperature * unit_registry.kelvin)
)
* unit_registry.molar
)
return result.to(out_unit)
elif final_type == "pic50":
result = (
value
/ (boltzmann_constant * temperature * unit_registry.kelvin)
/ np.log(10)
)
return result.to(out_unit)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "ki":
if final_type == "dg":
if value < 1e-15 * unit_registry("molar"):
return 0.0 * out_unit
else:
result = (
boltzmann_constant
* temperature
* unit_registry.kelvin
* np.log(value / unit_registry.molar)
)
return result.to(out_unit).round(2)
elif final_type == "ki":
return value.to(out_unit)
elif final_type == "ic50":
return value.to(out_unit)
elif final_type == "pic50":
if value < 1e-15 * unit_registry("molar"):
return -1e15 * out_unit
else:
result = -np.log(value / unit_registry.molar) / np.log(10)
return result
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "ic50":
if final_type == "dg":
if value < 1e-15 * unit_registry("molar"):
return 0.0 * out_unit
else:
result = (
boltzmann_constant
* temperature
* unit_registry.kelvin
* np.log(value.to("molar") / unit_registry.molar)
)
return result.to(out_unit).round(2)
elif final_type == "ki":
return value.to(out_unit)
elif final_type == "ic50":
return value.to(out_unit)
elif final_type == "pic50":
if value.to("molar") < 1e-15 * unit_registry("molar"):
return -1e15 * out_unit
else:
result = -np.log(value / unit_registry.molar) / np.log(10)
return result
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "pic50":
if final_type == "dg":
result = (
-boltzmann_constant
* temperature
* unit_registry.kelvin
* value
* np.log(10)
)
return result.to(out_unit).round(2)
elif final_type == "ki":
result = 10 ** (-value) * unit_registry("molar")
return result.to(out_unit)
elif final_type == "ic50":
result = 10 ** (-value) * unit_registry("molar")
return result.to(out_unit)
elif final_type == "pic50":
return value.to(out_unit)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
def convert_error(
error_value, value, original_type, final_type, temperature=300.0, out_unit=None
):
"""
Converts an experimental value into another derived quantity with specified unit.
:param error_value: float, error of val, numerical value
:param value: float, numerical value
:param original_type: string, code for the original observable. Can be `dg`, `ki`, `ic50`, `pic50`
:param final_type: string, code for the desired derived quantity. Can be `dg`, `ki`, `ic50`, `pic50`
:param temperature: float, temperature in kelvin
:param out_unit: unit of type :py:class:`pint`, output unit of final_type, needs to fit to the requested final_type
:return: :py:class:`pint.Quantity` with desired unit
"""
# define default units
if out_unit is None:
if final_type == "dg":
out_unit = unit_registry("kilocalories / mole")
elif final_type == "ki":
out_unit = unit_registry("nanomolar")
elif final_type == "ic50":
out_unit = unit_registry("nanomolar")
elif final_type == "pic50":
out_unit = unit_registry("")
if original_type == "dg":
if final_type == "dg":
return error_value.to(out_unit)
elif final_type == "ki":
# e_ki^2 = (del K/del dG)^2 * e_dG^2
# e_ki = 1/RT * exp(-dG/RT) * e_dG
k_bt = boltzmann_constant * temperature * unit_registry.kelvin
error = (
1.0 / k_bt * np.exp(-value / k_bt) * error_value * unit_registry.molar
)
return error.to(out_unit)
elif final_type == "ic50":
k_bt = boltzmann_constant * temperature * unit_registry.kelvin
error = (
1.0 / k_bt * np.exp(-value / k_bt) * error_value * unit_registry.molar
)
return error.to(out_unit)
elif final_type == "pic50":
# e_pic50^2 = (del pic50/del dG)^2 * e_dG^2
# e_pic50 = 1/(RT*ln(10)) * e_dG
k_bt = boltzmann_constant * temperature * unit_registry.kelvin
error = 1.0 / (k_bt * np.log(10)) * error_value
return error.to(out_unit)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "ki":
if final_type == "dg":
if value < 1e-15 * unit_registry.molar:
return 0.0 * out_unit
else:
error = (
boltzmann_constant
* temperature
* unit_registry.kelvin
/ value
* error_value
)
return error.to(out_unit).round(2)
elif final_type == "ki":
return error_value.to(out_unit)
elif final_type == "ic50":
return error_value.to(out_unit)
elif final_type == "pic50":
# e_pic50^2 = (del pic50/del Ki)^2 * e_Ki^2
# e_pic50 = 1/(Ki*ln(10)) * e_Ki
if (value * np.log(10)) < 1e-15 * unit_registry("molar"):
return 1e15 * out_unit
else:
result = 1 / (value * np.log(10)) * error_value
return result.to(out_unit).round(2)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "ic50":
if final_type == "dg":
if value < 1e-15 * unit_registry.molar:
return 0.0 * out_unit
else:
error = (
boltzmann_constant
* temperature
* unit_registry.kelvin
/ value
* error_value
)
return error.to(out_unit).round(2)
elif final_type == "ki":
return error_value.to(out_unit)
elif final_type == "ic50":
return error_value.to(out_unit)
elif final_type == "pic50":
# e_pic50^2 = (del pic50/del IC50)^2 * e_IC50^2
# e_pic50 = 1/(IC50*ln(10)) * e_IC50
if (value * np.log(10)) < 1e-15 * unit_registry("molar"):
return 1e15 * out_unit
else:
result = 1 / (value * np.log(10)) * error_value
return result.to(out_unit).round(2)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
elif original_type == "pic50":
if final_type == "dg":
error = (
boltzmann_constant
* temperature
* unit_registry.kelvin
* np.log(10)
* error_value
)
return error.to(out_unit).round(2)
elif final_type == "ki":
# Ki = 10^(-pIC50)
# dKi^2 = (del Ki / del pIC50)^2 * dpIC50^2
# dKi = ln(10) * 10^(-pIC50) * dpIC50
error = np.log(10) * 10 ** (-value) * error_value * unit_registry("molar")
return error.to(out_unit).round(2)
elif final_type == "ic50":
# IC50 = 10^(-pIC50)
# dIC50^2 = (del IC50 / del pIC50)^2 * dpIC50^2
# dIC50 = ln(10) * 10^(-pIC50) * dpIC50
error = np.log(10) * 10 ** (-value) * error_value * unit_registry("molar")
return error.to(out_unit).round(2)
elif final_type == "pic50":
return error_value.to(out_unit).round(2)
else:
raise NotImplementedError(
f"Conversion to observable {final_type} not possible. "
f"Observable must be any of: dg, ki, ic50 or pic50."
)
| 37.169863
| 119
| 0.52613
| 1,585
| 13,567
| 4.359621
| 0.116719
| 0.072938
| 0.05644
| 0.059913
| 0.839074
| 0.82026
| 0.817945
| 0.79479
| 0.775109
| 0.742547
| 0
| 0.035843
| 0.366625
| 13,567
| 364
| 120
| 37.271978
| 0.7683
| 0.153166
| 0
| 0.731959
| 0
| 0
| 0.130722
| 0.005116
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013746
| false
| 0
| 0.020619
| 0
| 0.185567
| 0.006873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca018ee35ad72d433a92d4988857e5e6b1d41606
| 31,759
|
py
|
Python
|
models.py
|
TsukkiGia/pytrix
|
5477bd6a757d8ee3a16ca1fc5513b6d2926b6f66
|
[
"MIT"
] | null | null | null |
models.py
|
TsukkiGia/pytrix
|
5477bd6a757d8ee3a16ca1fc5513b6d2926b6f66
|
[
"MIT"
] | null | null | null |
models.py
|
TsukkiGia/pytrix
|
5477bd6a757d8ee3a16ca1fc5513b6d2926b6f66
|
[
"MIT"
] | null | null | null |
from consts import *
from game2d import *
from consts import *
class Block(GRectangle):
def __init__(self, x, y, width, height, fillcolor, linecolor, linewidth, angle=0):
super().__init__(x=x,
y=y,
width=width,
height=height,
fillcolor=fillcolor,
linecolor=linecolor,
linewidth=linewidth,
angle=angle)
self.visible = True
def __repr__(self):
return f"{self.bottom}"
class Piece(object):
def __init__(self, init_x, init_y):
self.current_x = init_x
self.current_y = init_y
def canDrop(self, done):
return all([block.bottom > 0 for block in self.blocks]) and not any([any([block.bottom == done_block.top and block.left == done_block.left and block.right == done_block.right for done_block in done]) for block in self.blocks])
def canMoveLeft(self, done):
return all([block.left > 0 for block in self.blocks]) and not any([any([block.left == done_block.right and block.top == done_block.top and block.bottom == done_block.bottom for done_block in done]) for block in self.blocks])
def canMoveRight(self, done):
return all([block.right < BOARD_WIDTH for block in self.blocks]) and not any([any([block.right == done_block.left and block.top == done_block.top and block.bottom == done_block.bottom for done_block in done]) for block in self.blocks])
def canRotate(self, done, tentative_blocks):
return all([block.bottom >= 0 and block.left >= 0 and block.right <= BOARD_WIDTH for block in tentative_blocks]) and not any([any([done_block.x == block.x and done_block.y == block.y for done_block in done]) for block in tentative_blocks])
def rotate(self):
if self.orientation == ORIENTATION.West:
self.blocks = self.get_next_orientation()
self.orientation = ORIENTATION.North
elif self.orientation == ORIENTATION.North:
self.blocks = self.get_next_orientation()
self.orientation = ORIENTATION.East
elif self.orientation == ORIENTATION.East:
self.blocks = self.get_next_orientation()
self.orientation = ORIENTATION.South
elif self.orientation == ORIENTATION.South:
self.blocks = self.get_next_orientation()
self.orientation = ORIENTATION.West
def __repr__(self):
return f"{self.__class__.__name__}: ({self.current_x}, {self.current_y}), {self.orientation}"
class OPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=init_x - BLOCK_LENGTH/2, y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='blue', linecolor='black', linewidth=2),
Block(x=init_x + BLOCK_LENGTH/2, y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='blue', linecolor='black', linewidth=2),
Block(x=init_x - BLOCK_LENGTH/2, y=init_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='blue', linecolor='black', linewidth=2),
Block(x=init_x + BLOCK_LENGTH/2, y=init_y - 3*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='blue', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
return self.blocks
class IPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=init_x + (3*BLOCK_LENGTH/2), y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=init_x + (BLOCK_LENGTH/2), y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=init_x - BLOCK_LENGTH/2, y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=init_x - 3*(BLOCK_LENGTH/2), y=init_y - BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return [
Block(x=self.current_x + (3*BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x + (BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - 3*(BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.North:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 7*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.East:
return [
Block(x=self.current_x + (3*BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x + (BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - 3*(BLOCK_LENGTH/2), y=self.current_y - BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.South:
return [
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 7*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='green', linecolor='black', linewidth=2)
]
class SPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=init_x + BLOCK_LENGTH/2, y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=init_x + BLOCK_LENGTH/2, y=init_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=init_x + 3*BLOCK_LENGTH/2, y=init_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=init_x - BLOCK_LENGTH/2, y=init_y - 3*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.North:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.East:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.South:
return [
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='red', linecolor='black', linewidth=2)
]
class LPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return [
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.North:
return[
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.East:
return [
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.South:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y - 5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y - BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='purple', linecolor='black', linewidth=2)
]
class JPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - (BLOCK_LENGTH/2), y=self.current_y+(BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return[
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - (BLOCK_LENGTH/2), y=self.current_y+(BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2)]
elif self.orientation == ORIENTATION.North:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-5*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.East:
return [
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + 3*BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + (3*BLOCK_LENGTH/2), y=self.current_y-(3*BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.South:
return[
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x - BLOCK_LENGTH/2, y=self.current_y-5*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2),
Block(x=self.current_x + BLOCK_LENGTH/2, y=self.current_y-5*BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='gray', linecolor='black', linewidth=2)
]
class ZPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=init_x-BLOCK_LENGTH/2, y=init_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=init_x+BLOCK_LENGTH/2, y=init_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(init_x+BLOCK_LENGTH/2), y=init_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(init_x)+(3*BLOCK_LENGTH/2), y=init_y-(3*BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return [
Block(x=self.current_x+3*BLOCK_LENGTH/2, y=self.current_y+BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-(BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.North:
return [
Block(x=self.current_x-BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-(3*BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.East:
return[
Block(x=self.current_x+3*BLOCK_LENGTH/2, y=self.current_y+BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-(BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2)
]
elif self.orientation == ORIENTATION.South:
return [
Block(x=self.current_x-BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-(3*BLOCK_LENGTH/2),
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='brown', linecolor='black', linewidth=2)
]
class TPiece(Piece):
def __init__(self, init_x=BOARD_WIDTH/2, init_y=GAME_HEIGHT, orientation=ORIENTATION.North):
super().__init__(init_x, init_y)
self.orientation = orientation
self.blocks = [
Block(x=self.current_x-BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y+BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2)
]
def get_next_orientation(self):
if self.orientation == ORIENTATION.West:
return [
Block(x=self.current_x-BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y+BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2)]
elif self.orientation == ORIENTATION.North:
return [
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2 + BLOCK_LENGTH, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2)]
elif self.orientation == ORIENTATION.East:
return[
Block(x=self.current_x-BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x)+(3*BLOCK_LENGTH/2), y=self.current_y-BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2)]
elif self.orientation == ORIENTATION.South:
return [
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2 + BLOCK_LENGTH, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=self.current_x+BLOCK_LENGTH/2, y=self.current_y-BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x+BLOCK_LENGTH/2), y=self.current_y-3*BLOCK_LENGTH/2, width=BLOCK_LENGTH,
height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2),
Block(x=(self.current_x)-(BLOCK_LENGTH/2), y=self.current_y-BLOCK_LENGTH/2,
width=BLOCK_LENGTH, height=BLOCK_LENGTH, fillcolor='yellow', linecolor='black', linewidth=2)
]
| 70.419069
| 247
| 0.63349
| 4,129
| 31,759
| 4.64931
| 0.018891
| 0.285357
| 0.155024
| 0.083971
| 0.968433
| 0.950826
| 0.946658
| 0.944054
| 0.942595
| 0.929208
| 0
| 0.019578
| 0.240877
| 31,759
| 450
| 248
| 70.575556
| 0.776681
| 0
| 0
| 0.789731
| 0
| 0.002445
| 0.041312
| 0.000819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056235
| false
| 0
| 0.007335
| 0.017115
| 0.149144
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ca0a25fdb72172c9aec5b78161fb201b327764cc
| 3,314
|
py
|
Python
|
cantina_band.py
|
agimpel/RPi-music
|
87519fb8014feb28328dc402faf2d757970b3e16
|
[
"MIT"
] | null | null | null |
cantina_band.py
|
agimpel/RPi-music
|
87519fb8014feb28328dc402faf2d757970b3e16
|
[
"MIT"
] | 1
|
2018-07-14T23:14:09.000Z
|
2018-07-14T23:14:09.000Z
|
cantina_band.py
|
agimpel/RPi-music
|
87519fb8014feb28328dc402faf2d757970b3e16
|
[
"MIT"
] | null | null | null |
from speaker import Speaker
import time
import RPi.GPIO as GPIO
speaker = Speaker(GPIO.BCM, 23)
speaker.set_bpm(260)
speaker.pause(2)
#1
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
#2
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.pause(1/8)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/4)
#3
speaker.play('A1', 1/8)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/8)
speaker.play('G1', 1/8)
speaker.pause(1/8)
speaker.play('F1X', 1/8)
speaker.play('G1', 1/4)
#4
speaker.play('F1', 3/8)
speaker.play('D1', 4/8)
speaker.pause(1/8)
#5
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
#6
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.pause(1/8)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/4)
#7
speaker.play('G1', 1/4)
speaker.play('G1', 1/4)
speaker.pause(1/8)
speaker.play('F1X', 1/8)
speaker.play('G1', 1/8)
#8
speaker.play('C2', 1/8)
speaker.play('A1X', 1/4)
speaker.play('A1', 2/8)
speaker.play('G1', 3/8)
#9
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
speaker.play('A1', 1/4)
speaker.play('D2', 1/4)
#10
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.pause(1/8)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/4)
#11
speaker.play('C2', 1/4)
speaker.play('C2', 3/8)
speaker.play('F1', 1/8)
speaker.play('G1', 1/4)
#12
speaker.play('F1', 3/8)
speaker.play('D1', 4/8)
speaker.pause(1/8)
#13
speaker.play('D1', 1/2)
speaker.play('F1', 1/2)
#14
speaker.play('A1', 1/2)
speaker.play('C2', 1/2)
#15-16
speaker.play('D2X', 1/4)
speaker.play('D2', 1/4)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/4)
speaker.play('F1', 1/4)
speaker.pause(7/8)
#17
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#18
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#19-20
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('C2X', 1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 4/8)
speaker.play('F1', 5/8)
#21
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#22
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#23-24
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('C2X', 1/8)
speaker.play('D2', 1/4)
speaker.play('C2', 4/8)
speaker.play('E1', 5/8)
#25
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#26
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('D2', 1/4)
speaker.pause(1/4)
#27-28
speaker.pause(1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 1/8)
speaker.play('C2X', 1/8)
speaker.play('D2', 1/4)
speaker.play('A1', 4/8)
speaker.play('F1', 5/8)
#29
speaker.pause(1/4)
speaker.play('A1X', 1/4)
speaker.pause(1/4)
speaker.play('B1', 1/4)
#30
speaker.play('C2X', 1/8)
speaker.play('D2', 3/8)
speaker.play('F1', 5/8)
#31-32
speaker.play('D1', 1/8)
speaker.play('F1', 1/8)
speaker.play('A1X', 1/8)
speaker.play('D2', 1/8)
speaker.play('G1X', 1/8)
speaker.play('A1', 1/4)
speaker.play('F1', 5/8)
| 16.908163
| 31
| 0.638805
| 685
| 3,314
| 3.089051
| 0.084672
| 0.55104
| 0.311909
| 0.282609
| 0.844991
| 0.832231
| 0.764178
| 0.748582
| 0.708412
| 0.700378
| 0
| 0.142857
| 0.093844
| 3,314
| 195
| 32
| 16.994872
| 0.561772
| 0.018105
| 0
| 0.817518
| 0
| 0
| 0.070654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021898
| 0
| 0.021898
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ca2a020e55ef58b8ccd08ea58dfee06400f98911
| 34,753
|
py
|
Python
|
mpisppy/utils/pysp_model/tests/test_scenariotree.py
|
Matthew-Signorotti/mpi-sppy
|
5c6b4b8cd26af517ff09706d11751f2fb05b1b5f
|
[
"BSD-3-Clause"
] | 2
|
2020-06-05T14:31:46.000Z
|
2020-09-29T20:08:05.000Z
|
mpisppy/utils/pysp_model/tests/test_scenariotree.py
|
Matthew-Signorotti/mpi-sppy
|
5c6b4b8cd26af517ff09706d11751f2fb05b1b5f
|
[
"BSD-3-Clause"
] | 22
|
2020-06-06T19:30:33.000Z
|
2020-10-30T23:00:58.000Z
|
mpisppy/utils/pysp_model/tests/test_scenariotree.py
|
Matthew-Signorotti/mpi-sppy
|
5c6b4b8cd26af517ff09706d11751f2fb05b1b5f
|
[
"BSD-3-Clause"
] | 6
|
2020-06-06T17:57:38.000Z
|
2020-09-18T22:38:19.000Z
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# This file was originally part of PySP and Pyomo, available: https://github.com/Pyomo/pysp
# Copied with modification from pysp/tests/unit/test_scenariotree.py
import pyomo.common.unittest as unittest
from mpisppy.utils.pysp_model.tree_structure_model import \
(ScenarioTreeModelFromNetworkX,
CreateConcreteTwoStageScenarioTreeModel)
from mpisppy.utils.pysp_model.tree_structure import ScenarioTree
from mpisppy.utils.pysp_model.pysp_model import _get_nonant_list, _get_derived_nonant_list
from pyomo.core import (ConcreteModel,
Set,
Var,
Expression,
Objective,
Block,
value)
from pyomo.common.dependencies import (
networkx, networkx_available as has_networkx
)
def _get_names(iterable):
return [_.name for _ in iterable]
class TestScenarioTree(unittest.TestCase):
def _get_block_model(self):
model = ConcreteModel()
model.s = Set(initialize=[1,2])
b = Block(concrete=True)
b.s = Set(initialize=[1,2])
b.x = Var()
b.X = Var(model.s)
model.b1 = b.clone()
model.b2 = b.clone()
model.b3 = b.clone()
model.b4 = b.clone()
model.B1 = Block(model.s, rule=lambda _,i: b.clone())
model.B2 = Block(model.s, rule=lambda _,i: b.clone())
model.B3 = Block(model.s, rule=lambda _,i: b.clone())
model.B4 = Block(model.s, rule=lambda _,i: b.clone())
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
return model
def test_indexedblock_noindextemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("B1")
st_model.StageDerivedVariables['Stage1'].add("B2")
st_model.NodeVariables['RootNode'].add("B3")
st_model.NodeDerivedVariables['RootNode'].add("B4")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = self._get_block_model()
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 12
assert len(root_derived_nonant_names) == 12
for name in (
"B1[1].x", "B1[2].x",
"B3[1].x", "B3[2].x",
):
assert name in root_nonant_names
for name in (
"B1[1].X", "B1[2].X",
"B3[1].X", "B3[2].X",
):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_nonant_names
for name in (
"B2[1].x", "B2[2].x",
"B4[1].x", "B4[2].x",
):
assert name in root_derived_nonant_names
for name in (
"B2[1].X", "B2[2].X",
"B4[1].X", "B4[2].X",
):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_derived_nonant_names
def test_indexedblock_wildcardtemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("B1[*]")
st_model.StageDerivedVariables['Stage1'].add("B2[*]")
st_model.NodeVariables['RootNode'].add("B3[*]")
st_model.NodeDerivedVariables['RootNode'].add("B4[*]")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = self._get_block_model()
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 12
assert len(root_derived_nonant_names) == 12
for name in (
"B1[1].x", "B1[2].x",
"B3[1].x", "B3[2].x",
):
assert name in root_nonant_names
for name in (
"B1[1].X", "B1[2].X",
"B3[1].X", "B3[2].X",
):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_nonant_names
for name in (
"B2[1].x", "B2[2].x",
"B4[1].x", "B4[2].x",
):
assert name in root_derived_nonant_names
for name in (
"B2[1].X", "B2[2].X",
"B4[1].X", "B4[2].X",
):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_derived_nonant_names
def test_singletonblock_wildcardtemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("b1[*]")
st_model.StageDerivedVariables['Stage1'].add("b2[*]")
st_model.NodeVariables['RootNode'].add("b3[*]")
st_model.NodeDerivedVariables['RootNode'].add("b4[*]")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = self._get_block_model()
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 6
assert len(root_derived_nonant_names) == 6
for name in ("b1.x", "b3.x"):
assert name in root_nonant_names
for name in ("b1.X", "b3.X"):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_nonant_names
for name in ("b2.x", "b4.x"):
assert name in root_derived_nonant_names
for name in ("b2.X", "b4.X"):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_derived_nonant_names
def test_singletonblock_noindextemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("b1")
st_model.StageDerivedVariables['Stage1'].add("b2")
st_model.NodeVariables['RootNode'].add("b3")
st_model.NodeDerivedVariables['RootNode'].add("b4")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = self._get_block_model()
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 6
assert len(root_derived_nonant_names) == 6
for name in ("b1.x", "b3.x"):
assert name in root_nonant_names
for name in ("b1.X", "b3.X"):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_nonant_names
for name in ("b2.x", "b4.x"):
assert name in root_derived_nonant_names
for name in ("b2.X", "b4.X"):
var = model.find_component(name)
for vardata in var.values():
assert vardata.name in root_derived_nonant_names
def test_singletonvar_noindextemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("x")
st_model.StageDerivedVariables['Stage1'].add("y")
st_model.NodeVariables['RootNode'].add("z")
st_model.NodeDerivedVariables['RootNode'].add("q")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = ConcreteModel()
model.x = Var()
model.y = Var()
model.z = Var()
model.q = Var()
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 2
assert len(root_derived_nonant_names) == 2
for name in ("x", "z"):
assert name in root_nonant_names
for name in ("y", "q"):
assert name in root_derived_nonant_names
def test_singletonvar_wildcardtemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("x[*]")
st_model.StageDerivedVariables['Stage1'].add("y[*]")
st_model.NodeVariables['RootNode'].add("z[*]")
st_model.NodeDerivedVariables['RootNode'].add("q[*]")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = ConcreteModel()
model.x = Var()
model.y = Var()
model.z = Var()
model.q = Var()
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 2
assert len(root_derived_nonant_names) == 2
for name in ("x", "z"):
assert name in root_nonant_names
for name in ("y", "q"):
assert name in root_derived_nonant_names
def test_multiindexedvar_singlewildcardtemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("x[*,* ]")
st_model.StageDerivedVariables['Stage1'].add("y[ *,*]")
st_model.NodeVariables['RootNode'].add("z[*,*]")
st_model.NodeDerivedVariables['RootNode'].add("q[ * , * ]")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = ConcreteModel()
model.s = Set(initialize=[(1,'a'),(2,'b'),(3,'c')])
model.x = Var(model.s)
model.y = Var(model.s)
model.z = Var(model.s)
model.q = Var(model.s)
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 6
assert len(root_derived_nonant_names) == 6
for name in ("x", "z"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_nonant_names
for name in ("y", "q"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_derived_nonant_names
def test_indexedvar_indextemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("x[*]")
st_model.StageDerivedVariables['Stage1'].add("y[*]")
st_model.NodeVariables['RootNode'].add("z[*]")
st_model.NodeDerivedVariables['RootNode'].add("q[*]")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = ConcreteModel()
model.s = Set(initialize=[1,2,3])
model.x = Var(model.s)
model.y = Var(model.s)
model.z = Var(model.s)
model.q = Var(model.s)
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 6
assert len(root_derived_nonant_names) == 6
for name in ("x", "z"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_nonant_names
for name in ("y", "q"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_derived_nonant_names
def test_indexedvar_noindextemplate(self):
st_model = CreateConcreteTwoStageScenarioTreeModel(1)
st_model.StageVariables['Stage1'].add("x")
st_model.StageDerivedVariables['Stage1'].add("y")
st_model.NodeVariables['RootNode'].add("z")
st_model.NodeDerivedVariables['RootNode'].add("q")
st_model.StageCost['Stage1'] = "FirstStageCost"
st_model.StageCost['Stage2'] = "SecondStageCost"
scenario_tree = ScenarioTree(scenariotreeinstance=st_model)
self.assertEqual(len(scenario_tree.stages), 2)
self.assertEqual(len(scenario_tree.nodes), 2)
self.assertEqual(len(scenario_tree.scenarios), 1)
model = ConcreteModel()
model.s = Set(initialize=[1,2,3])
model.x = Var(model.s)
model.y = Var(model.s)
model.z = Var(model.s)
model.q = Var(model.s)
model.FirstStageCost = Expression(expr=0.0)
model.SecondStageCost = Expression(expr=0.0)
model.obj = Objective(expr=0.0)
root = scenario_tree.findRootNode()
root_nonant_names = _get_names(_get_nonant_list(model, root))
root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root))
assert len(root_nonant_names) == 6
assert len(root_derived_nonant_names) == 6
for name in ("x", "z"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_nonant_names
for name in ("y", "q"):
indexed_var = model.find_component(name)
for index in model.s:
var = indexed_var[index]
assert var.name in root_derived_nonant_names
@unittest.skipIf(not has_networkx, "Requires networkx module")
class TestScenarioTreeFromNetworkX(unittest.TestCase):
def test_empty(self):
G = networkx.DiGraph()
with self.assertRaises(networkx.NetworkXPointlessConcept):
ScenarioTreeModelFromNetworkX(G)
def test_not_tree(self):
G = networkx.DiGraph()
G.add_node("1")
G.add_node("2")
G.add_edge("1", "2")
G.add_edge("2", "1")
with self.assertRaises(TypeError):
ScenarioTreeModelFromNetworkX(G)
def test_not_directed(self):
G = networkx.Graph()
G.add_node("1")
G.add_node("2")
G.add_edge("1", "2")
with self.assertRaises(TypeError):
ScenarioTreeModelFromNetworkX(G)
def test_not_branching(self):
G = networkx.DiGraph()
G.add_node("1")
G.add_node("2")
G.add_node("R")
G.add_edge("1", "R")
G.add_edge("2", "R")
with self.assertRaises(TypeError):
ScenarioTreeModelFromNetworkX(G)
def test_not_enough_stages(self):
G = networkx.DiGraph()
G.add_node("R")
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(G)
def test_missing_node_name(self):
G = networkx.DiGraph()
G.add_node("R", name="Root")
G.add_node("C")
G.add_edge("R", "C", weight=1)
with self.assertRaises(KeyError):
ScenarioTreeModelFromNetworkX(
G,
node_name_attribute="name")
def test_missing_scenario_name(self):
G = networkx.DiGraph()
G.add_node("R", name="Root")
G.add_node("C")
G.add_edge("R", "C", weight=1)
with self.assertRaises(KeyError):
ScenarioTreeModelFromNetworkX(
G,
scenario_name_attribute="name")
def test_missing_weight(self):
G = networkx.DiGraph()
G.add_node("R", name="Root")
G.add_node("C", name="Child")
G.add_edge("R", "C")
with self.assertRaises(KeyError):
ScenarioTreeModelFromNetworkX(G)
def test_bad_weight1(self):
G = networkx.DiGraph()
G.add_node("R",)
G.add_node("C",)
G.add_edge("R", "C",weight=0.8)
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(G)
def test_bad_weight2(self):
G = networkx.DiGraph()
G.add_node("R")
G.add_node("C1")
G.add_edge("R", "C1", weight=0.8)
G.add_node("C2")
G.add_edge("R", "C2", weight=0.1)
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(G)
def test_bad_custom_stage_names1(self):
G = networkx.DiGraph()
G.add_node("R",)
G.add_node("C1")
G.add_edge("R", "C1", weight=1.0)
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(
G, stage_names=["Stage1"])
def test_bad_custom_stage_names2(self):
G = networkx.DiGraph()
G.add_node("R")
G.add_node("C1")
G.add_edge("R", "C1", weight=1.0)
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(
G, stage_names=["Stage1","Stage1"])
def test_two_stage(self):
G = networkx.DiGraph()
G.add_node("Root")
G.add_node("Child1")
G.add_edge("Root", "Child1", weight=0.8)
G.add_node("Child2")
G.add_edge("Root", "Child2", weight=0.2)
model = ScenarioTreeModelFromNetworkX(G)
self.assertEqual(
sorted(list(model.Stages)),
sorted(["Stage1", "Stage2"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted(["Root", "Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Root"])),
sorted(["Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Child1"])),
sorted([]))
self.assertEqual(
sorted(list(model.Children["Child2"])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted(["Child1", "Child2"]))
self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0)
self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8)
self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2)
model.StageCost["Stage1"] = "c1"
model.StageCost["Stage2"] = "c2"
model.StageVariables["Stage1"].add("x")
self.assertEqual(model.Bundling.value, False)
self.assertEqual(list(model.Bundles), [])
self.assertEqual(len(model.BundleScenarios), 0)
ScenarioTree(scenariotreeinstance=model)
def test_two_stage_more_node_attributes(self):
G = networkx.DiGraph()
G.add_node("Root",
cost="c1",
variables=["x"],
derived_variables=["y"])
G.add_node("Child1",
cost="c2",
variables=["q"],
derived_variables=["z"])
G.add_edge("Root", "Child1", weight=0.8)
G.add_node("Child2",
cost="c2",
variables=["q"],
derived_variables=["z"])
G.add_edge("Root", "Child2", weight=0.2)
model = ScenarioTreeModelFromNetworkX(G)
self.assertEqual(
sorted(list(model.Stages)),
sorted(["Stage1", "Stage2"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted(["Root", "Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Root"])),
sorted(["Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Child1"])),
sorted([]))
self.assertEqual(
sorted(list(model.Children["Child2"])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted(["Child1", "Child2"]))
self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0)
self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8)
self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2)
self.assertEqual(model.StageCost["Stage1"].value, None)
self.assertEqual(list(model.StageVariables["Stage1"]), [])
self.assertEqual(list(model.StageDerivedVariables["Stage1"]), [])
self.assertEqual(model.NodeCost["Root"].value, "c1")
self.assertEqual(list(model.NodeVariables["Root"]), ["x"])
self.assertEqual(list(model.NodeDerivedVariables["Root"]), ["y"])
self.assertEqual(model.StageCost["Stage2"].value, None)
self.assertEqual(list(model.StageVariables["Stage2"]), [])
self.assertEqual(list(model.StageDerivedVariables["Stage2"]), [])
self.assertEqual(model.NodeCost["Child1"].value, "c2")
self.assertEqual(list(model.NodeVariables["Child1"]), ["q"])
self.assertEqual(list(model.NodeDerivedVariables["Child1"]), ["z"])
self.assertEqual(model.NodeCost["Child2"].value, "c2")
self.assertEqual(list(model.NodeVariables["Child2"]), ["q"])
self.assertEqual(list(model.NodeDerivedVariables["Child2"]), ["z"])
self.assertEqual(model.Bundling.value, False)
self.assertEqual(list(model.Bundles), [])
self.assertEqual(len(model.BundleScenarios), 0)
ScenarioTree(scenariotreeinstance=model)
def test_two_stage_custom_names(self):
G = networkx.DiGraph()
G.add_node("R", label="Root")
G.add_node("C1", label="Child1", scenario="S1")
G.add_edge("R", "C1", probability=0.8)
G.add_node("C2", label="Child2", scenario="S2")
G.add_edge("R", "C2", probability=0.2)
model = ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute="probability",
node_name_attribute="label",
stage_names=["T1","T2"],
scenario_name_attribute="scenario")
self.assertEqual(
sorted(list(model.Stages)),
sorted(["T1", "T2"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted(["Root", "Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Root"])),
sorted(["Child1", "Child2"]))
self.assertEqual(
sorted(list(model.Children["Child1"])),
sorted([]))
self.assertEqual(
sorted(list(model.Children["Child2"])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted(["S1", "S2"]))
self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0)
self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8)
self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2)
model.StageCost["T1"] = "c1"
model.StageCost["T2"] = "c2"
model.StageVariables["T1"].add("x")
self.assertEqual(model.Bundling.value, False)
self.assertEqual(list(model.Bundles), [])
self.assertEqual(len(model.BundleScenarios), 0)
ScenarioTree(scenariotreeinstance=model)
def test_multi_stage(self):
G = networkx.balanced_tree(3,2,networkx.DiGraph())
model = ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
self.assertEqual(
sorted(list(model.Stages)),
sorted(["Stage1", "Stage2", "Stage3"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]))
self.assertEqual(
sorted(list(model.Children[0])),
sorted([1,2,3]))
self.assertEqual(
sorted(list(model.Children[1])),
sorted([4,5,6]))
self.assertEqual(
sorted(list(model.Children[2])),
sorted([7,8,9]))
self.assertEqual(
sorted(list(model.Children[3])),
sorted([10,11,12]))
self.assertEqual(
sorted(list(model.Children[4])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[5])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[6])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[7])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[8])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[9])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[10])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[11])),
sorted([]))
self.assertEqual(
sorted(list(model.Children[12])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted([4, 5, 6, 7, 8, 9, 10, 11, 12]))
self.assertEqual(value(model.ConditionalProbability[0]), 1.0)
self.assertAlmostEqual(value(model.ConditionalProbability[1]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[2]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[3]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[4]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[5]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[6]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[7]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[8]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[9]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[10]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[11]), 1.0/3)
self.assertAlmostEqual(value(model.ConditionalProbability[12]), 1.0/3)
model.StageCost["Stage1"] = "c1"
model.StageCost["Stage2"] = "c2"
model.StageCost["Stage3"] = "c3"
model.StageVariables["Stage1"].add("x")
model.StageVariables["Stage2"].add("y")
model.StageVariables["Stage3"].add("y")
self.assertEqual(model.Bundling.value, False)
self.assertEqual(list(model.Bundles), [])
self.assertEqual(len(model.BundleScenarios), 0)
ScenarioTree(scenariotreeinstance=model)
def test_unbalanced(self):
G = networkx.DiGraph()
G.add_node("R")
G.add_node("0")
G.add_node("1")
G.add_edge("R", "0")
G.add_edge("R", "1")
G.add_node("00")
G.add_node("01")
G.add_edge("0", "00")
G.add_edge("0", "01")
model = ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
self.assertEqual(
sorted(list(model.Stages)),
sorted(["Stage1", "Stage2", "Stage3"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted(["R","0","1","00","01"]))
self.assertEqual(
sorted(list(model.Children["R"])),
sorted(["0", "1"]))
self.assertEqual(
sorted(list(model.Children["0"])),
sorted(["00","01"]))
self.assertEqual(
sorted(list(model.Children["1"])),
sorted([]))
self.assertEqual(
sorted(list(model.Children["00"])),
sorted([]))
self.assertEqual(
sorted(list(model.Children["01"])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted(["00", "01", "1"]))
self.assertEqual(value(model.ConditionalProbability["R"]), 1.0)
self.assertEqual(value(model.ConditionalProbability["0"]), 0.5)
self.assertEqual(value(model.ConditionalProbability["1"]), 0.5)
self.assertEqual(value(model.ConditionalProbability["00"]), 0.5)
self.assertEqual(value(model.ConditionalProbability["01"]), 0.5)
model.StageCost["Stage1"] = "c1"
model.StageCost["Stage2"] = "c2"
model.StageCost["Stage3"] = "c3"
model.StageVariables["Stage1"].add("x")
model.StageVariables["Stage2"].add("x")
self.assertEqual(model.Bundling.value, False)
self.assertEqual(list(model.Bundles), [])
self.assertEqual(len(model.BundleScenarios), 0)
ScenarioTree(scenariotreeinstance=model)
def test_bundles(self):
G = networkx.DiGraph()
G.add_node("r")
for i in range(4):
G.add_node("u"+str(i), bundle=i%2)
G.add_edge("r", "u"+str(i))
model = ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
self.assertEqual(
sorted(list(model.Stages)),
sorted(["Stage1", "Stage2"]))
self.assertEqual(
sorted(list(model.Nodes)),
sorted(["r", "u0", "u1", "u2", "u3"]))
self.assertEqual(
sorted(list(model.Children["r"])),
sorted(["u0", "u1", "u2", "u3"]))
for i in range(4):
self.assertEqual(
sorted(list(model.Children["u"+str(i)])),
sorted([]))
self.assertEqual(
sorted(list(model.Scenarios)),
sorted(["u0", "u1", "u2", "u3"]))
self.assertEqual(value(model.ConditionalProbability["r"]), 1.0)
for i in range(4):
self.assertEqual(value(model.ConditionalProbability["u"+str(i)]),
0.25)
self.assertEqual(model.Bundling.value, True)
self.assertEqual(list(model.Bundles), [0, 1])
for k, bundle_name in enumerate(model.Bundles):
self.assertEqual(list(model.BundleScenarios[bundle_name]),
["u"+str(i) for i in range(4)
if i%2 == k])
model.StageCost["Stage1"] = "c1"
model.StageCost["Stage2"] = "c2"
model.StageVariables["Stage1"].add("x")
ScenarioTree(scenariotreeinstance=model)
def test_bundles_incomplete(self):
G = networkx.DiGraph()
G.add_node("r")
for i in range(4):
G.add_node("u"+str(i), bundle="B")
G.add_edge("r", "u"+str(i))
model = ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
self.assertEqual(model.Bundling.value, True)
self.assertEqual(list(model.Bundles), ["B"])
self.assertEqual(list(model.BundleScenarios["B"]),
["u"+str(i) for i in range(4)])
G.nodes["u0"]["bundle"] = None
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
del G.nodes["u0"]["bundle"]
with self.assertRaises(ValueError):
ScenarioTreeModelFromNetworkX(
G,
edge_probability_attribute=None)
if __name__ == "__main__":
unittest.main()
| 39.808706
| 91
| 0.594798
| 3,857
| 34,753
| 5.162821
| 0.061187
| 0.095666
| 0.049566
| 0.059007
| 0.890474
| 0.864912
| 0.837744
| 0.78843
| 0.714257
| 0.685884
| 0
| 0.025352
| 0.26907
| 34,753
| 872
| 92
| 39.854358
| 0.758562
| 0.019193
| 0
| 0.718016
| 0
| 0
| 0.057525
| 0
| 0
| 0
| 0
| 0
| 0.25718
| 1
| 0.039164
| false
| 0
| 0.007833
| 0.001305
| 0.052219
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca8cb01a572d2050f72204fbb3bd1ee5653e56e5
| 261
|
py
|
Python
|
contrib/report_builders/__init__.py
|
berndonline/flan
|
3d851d9aa0b73d4e32d8f311e2ddefafa15648a2
|
[
"BSD-3-Clause"
] | 3,711
|
2019-11-20T23:58:42.000Z
|
2022-03-27T18:43:51.000Z
|
contrib/report_builders/__init__.py
|
berndonline/flan
|
3d851d9aa0b73d4e32d8f311e2ddefafa15648a2
|
[
"BSD-3-Clause"
] | 56
|
2019-11-21T19:21:23.000Z
|
2022-03-20T19:46:22.000Z
|
contrib/report_builders/__init__.py
|
berndonline/flan
|
3d851d9aa0b73d4e32d8f311e2ddefafa15648a2
|
[
"BSD-3-Clause"
] | 295
|
2019-11-21T15:54:26.000Z
|
2022-03-24T15:18:12.000Z
|
from .report_builder import ReportBuilder
from .latex_report_builder import LatexReportBuilder
from .markdown_report_builder import MarkdownReportBuilder
from .json_report_builder import JsonReportBuilder
from .html_report_builder import JinjaHtmlReportBuilder
| 43.5
| 58
| 0.904215
| 29
| 261
| 7.827586
| 0.448276
| 0.286344
| 0.418502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076628
| 261
| 5
| 59
| 52.2
| 0.941909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
046058a96701e019af377663b6bbf0d5f9acfc02
| 168
|
py
|
Python
|
app/views/index.py
|
Depado/LostInNetworkWeb
|
d3963819aa6f770d56836de6ef82f469cfebe900
|
[
"MIT"
] | 1
|
2015-11-08T10:13:19.000Z
|
2015-11-08T10:13:19.000Z
|
app/views/index.py
|
Depado/LostInNetworkWeb
|
d3963819aa6f770d56836de6ef82f469cfebe900
|
[
"MIT"
] | null | null | null |
app/views/index.py
|
Depado/LostInNetworkWeb
|
d3963819aa6f770d56836de6ef82f469cfebe900
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import render_template
from app import app
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
| 16.8
| 40
| 0.666667
| 23
| 168
| 4.782609
| 0.695652
| 0.254545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.154762
| 168
| 9
| 41
| 18.666667
| 0.767606
| 0.125
| 0
| 0
| 0
| 0
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
04f7aa61b6e08ff8640b93f52c8fec939d235137
| 75,909
|
py
|
Python
|
backend/web_app/tests/snapshots/snap_test_statistics_report.py
|
jsc-masshtab/vdi-server
|
3de49dec986ab26ffc6c073873fb9de5943809f9
|
[
"MIT"
] | 2
|
2021-12-03T10:04:25.000Z
|
2022-01-12T06:26:39.000Z
|
backend/web_app/tests/snapshots/snap_test_statistics_report.py
|
jsc-masshtab/vdi-server
|
3de49dec986ab26ffc6c073873fb9de5943809f9
|
[
"MIT"
] | null | null | null |
backend/web_app/tests/snapshots/snap_test_statistics_report.py
|
jsc-masshtab/vdi-server
|
3de49dec986ab26ffc6c073873fb9de5943809f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_web_statistics_report 1'] = {
'web_statistics_report': '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="ru">
<head>
<meta name="generator" content="AWStats 7.5 (build 20160301) from config file awstats.vdi.conf (http://www.awstats.org)">
<meta name="robots" content="noindex,nofollow">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta http-equiv="description" content="Awstats - Advanced Web Statistics for vdi (2021-10) - main">
<title>Статистика за vdi (2021-10) - main</title>
<style type="text/css">
body { font: 11px verdana, arial, helvetica, sans-serif; background-color: #FFFFFF; margin-top: 0; margin-bottom: 0; }
.aws_bodyl { }
.aws_border { border-collapse: collapse; background-color: #CCCCDD; padding: 1px 1px 1px 1px; margin-top: 0px; margin-bottom: 0px; }
.aws_title { font: 13px verdana, arial, helvetica, sans-serif; font-weight: bold; background-color: #CCCCDD; text-align: center; margin-top: 0; margin-bottom: 0; padding: 1px 1px 1px 1px; color: #000000; }
.aws_blank { font: 13px verdana, arial, helvetica, sans-serif; background-color: #FFFFFF; text-align: center; margin-bottom: 0; padding: 1px 1px 1px 1px; }
.aws_data {
\tbackground-color: #FFFFFF;
\tborder-top-width: 1px;
\tborder-left-width: 0px;
\tborder-right-width: 0px;
\tborder-bottom-width: 0px;
}
.aws_formfield { font: 13px verdana, arial, helvetica; }
.aws_button {
\tfont-family: arial,verdana,helvetica, sans-serif;
\tfont-size: 12px;
\tborder: 1px solid #ccd7e0;
\tbackground-image : url(/awstatsicons/other/button.gif);
}
th\t\t{ border-color: #ECECEC; border-left-width: 0px; border-right-width: 1px; border-top-width: 0px; border-bottom-width: 1px; padding: 1px 2px 1px 1px; font: 11px verdana, arial, helvetica, sans-serif; text-align:center; color: #000000; }
th.aws\t{ border-color: #ECECEC; border-left-width: 0px; border-right-width: 1px; border-top-width: 0px; border-bottom-width: 1px; padding: 1px 2px 1px 1px; font-size: 13px; font-weight: bold; }
td\t\t{ border-color: #ECECEC; border-left-width: 0px; border-right-width: 1px; border-top-width: 0px; border-bottom-width: 1px; font: 11px verdana, arial, helvetica, sans-serif; text-align:center; color: #000000; }
td.aws\t{ border-color: #ECECEC; border-left-width: 0px; border-right-width: 1px; border-top-width: 0px; border-bottom-width: 1px; font: 11px verdana, arial, helvetica, sans-serif; text-align:left; color: #000000; padding: 0px;}
td.awsm\t{ border-left-width: 0px; border-right-width: 0px; border-top-width: 0px; border-bottom-width: 0px; font: 11px verdana, arial, helvetica, sans-serif; text-align:left; color: #000000; padding: 0px; }
b { font-weight: bold; }
a { font: 11px verdana, arial, helvetica, sans-serif; }
a:link { color: #0011BB; text-decoration: none; }
a:visited { color: #0011BB; text-decoration: none; }
a:hover { color: #605040; text-decoration: underline; }
.currentday { font-weight: bold; }
</style>
</head>
<body style="margin-top: 0px">
<a name="top"></a>
<a name="menu"> </a>
<form name="FormDateFilter" action="/awstats/awstats.pl?config=vdi&update" style="padding: 0px 0px 20px 0px; margin-top: 0">
<table class="aws_border" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td>
<table class="aws_data sortable" border="0" cellpadding="1" cellspacing="0" width="100%">
<tr><td class="aws" valign="middle"><b>Статистика за:</b> </td><td class="aws" valign="middle"><span style="font-size: 14px;">vdi</span></td><td align="right" rowspan="3"><a href="http://www.awstats.org" target="awstatshome"><img src="/awstatsicons/other/awstats_logo6.png" border="0" alt=Awstats Web Site title=Awstats Web Site /></a></td></tr>
<tr valign="middle"><td class="aws" valign="middle" width="150"><b>Последнее обновление:</b> </td><td class="aws" valign="middle"><span style="font-size: 12px;">27 Окт 2021 - 09:44</span></td></tr>
<tr><td class="aws" valign="middle"><b>Отчетный период:</b></td><td class="aws" valign="middle"><span style="font-size: 14px;">Месяц Окт 2021</span></td></tr>
</table>
</td></tr></table>
</form><br />
<table>
<tr><td class="awsm" width="150" valign="top"><b>Когда:</b></td>
<td class="awsm"><a href="#month">История за месяц</a> <a href="#daysofmonth">День месяца</a> <a href="#daysofweek">День недели</a> <a href="#hours">Часы</a> </td></tr>
<tr><td class="awsm" width="150" valign="top"><b>Кто:</b></td>
<td class="awsm"><a href="#countries">Страны</a> <a href="awstats.vdi.alldomains.html" target="awstatsbis">Полный список</a>
<a href="#visitors">Хосты</a> <a href="awstats.vdi.allhosts.html" target="awstatsbis">Полный список</a>
<a href="awstats.vdi.lasthosts.html" target="awstatsbis">Последний визит</a>
<a href="awstats.vdi.unknownip.html" target="awstatsbis">Неразрешенный IP адрес</a>
<a href="#robots">Роботы/Пауки посетители</a> <a href="awstats.vdi.allrobots.html" target="awstatsbis">Полный список</a>
<a href="awstats.vdi.lastrobots.html" target="awstatsbis">Последний визит</a>
</td></tr>
<tr><td class="awsm" valign="top"><b>Навигация:</b></td>
<td class="awsm"><a href="#sessions">Продолжительность визитов</a> <a href="#filetypes">Тип файла</a> <a href="#downloads">Downloads</a> <a href="awstats.vdi.downloads.html" target="awstatsbis">Полный список</a>
<a href="#urls">Просмотров</a>
<a href="awstats.vdi.urldetail.html" target="awstatsbis">Полный список</a>
<a href="awstats.vdi.urlentry.html" target="awstatsbis">Вхождение</a>
<a href="awstats.vdi.urlexit.html" target="awstatsbis">Выход</a>
<a href="#os">Операционные системы</a> <a href="awstats.vdi.osdetail.html" target="awstatsbis">Версии</a>
<a href="awstats.vdi.unknownos.html" target="awstatsbis">Неизвестный</a>
<a href="#browsers">Браузеры</a> <a href="awstats.vdi.browserdetail.html" target="awstatsbis">Версии</a>
<a href="awstats.vdi.unknownbrowser.html" target="awstatsbis">Неизвестный</a>
</td></tr>
<tr><td class="awsm" width="150" valign="top"><b>Рефереры:</b></td>
<td class="awsm"><a href="#referer">Происхождение</a> <a href="awstats.vdi.refererse.html" target="awstatsbis">Ссылающиеся поисковые машины</a>
<a href="awstats.vdi.refererpages.html" target="awstatsbis">Ссылающиеся сайты</a>
<a href="#keys">Поиск</a> <a href="awstats.vdi.keyphrases.html" target="awstatsbis">Поисковые Ключевые фразы</a>
<a href="awstats.vdi.keywords.html" target="awstatsbis">Поисковые Ключевые слова</a>
</td></tr>
<tr><td class="awsm" width="150" valign="top"><b>Остальные:</b></td>
<td class="awsm"><a href="#misc">Смешанные</a> <a href="#errors">Статусы ошибок HTTP</a> <a href="awstats.vdi.errors400.html" target="awstatsbis">Ошибка Хиты (400)</a>
<a href="awstats.vdi.errors403.html" target="awstatsbis">Ошибка Хиты (403)</a>
<a href="awstats.vdi.errors404.html" target="awstatsbis">Ошибка Хиты (404)</a>
</td></tr>
</table>
<br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Общее </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><td class="aws"><b>Отчетный период</b></td><td class="aws" colspan="5">
Месяц Окт 2021</td></tr>
<tr bgcolor="#ECECEC"><td class="aws"><b>Первый визит</b></td>
<td class="aws" colspan="5">26 Окт 2021 - 10:29</td></tr>
<tr bgcolor="#ECECEC"><td class="aws"><b>Последний визит</b></td>
<td class="aws" colspan="5">27 Окт 2021 - 09:43</td>
</tr>
<tr><td bgcolor="#CCCCDD"> </td><td width="17%" bgcolor="#FFAA66">Уникальные посетители</td><td width="17%" bgcolor="#F4F090">Количество визитов</td><td width="17%" bgcolor="#4477DD">Страницы</td><td width="17%" bgcolor="#66DDEE">Хиты</td><td width="17%" bgcolor="#2EA495">Объем</td></tr>
<tr><td class="aws">Отображаемый трафик *</td><td><b>2</b><br /> </td><td><b>6</b><br />(3 Визитов/Посетитель)</td><td><b>660</b><br />(110 Страницы/Визит)</td><td><b>1,010</b><br />(168.33 Хиты/Визит)</td><td><b>7.42 МБ</b><br />(1266.11 КБ/Визит)</td></tr>
<tr><td class="aws">Не отображаемый трафик *</td><td colspan="2"> <br /> </td>
<td><b>2</b></td><td><b>11</b></td><td><b>16.85 КБ</b></td></tr>
</table></td></tr></table><span style="font: 11px verdana, arial, helvetica;">* Не отображаемый трафик влючает в себя трафик сгенерированный роботами, вирусами или ответом сервера со специальным HTTP кодом.</span><br />
<br />
<a name="month"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">История за месяц </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr><td align="center">
<center>
<table>
<tr valign="bottom"><td> </td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="31" width="6" alt=Уникальные посетители: 2 title=Уникальные посетители: 2 /><img align="bottom" src="/awstatsicons/other/vv.png" height="91" width="6" alt=Количество визитов: 6 title=Количество визитов: 6 /><img align="bottom" src="/awstatsicons/other/vp.png" height="59" width="6" alt=Страницы: 660 title=Страницы: 660 /><img align="bottom" src="/awstatsicons/other/vh.png" height="91" width="6" alt=Хиты: 1010 title=Хиты: 1010 /><img align="bottom" src="/awstatsicons/other/vk.png" height="91" width="6" alt=Объем: 7.42 МБ title=Объем: 7.42 МБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vu.png" height="1" width="6" alt=Уникальные посетители: 0 title=Уникальные посетители: 0 /><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="6" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td> </td></tr>
<tr valign="middle"><td> </td><td>Янв<br />2021</td><td>Фев<br />2021</td><td>Мар<br />2021</td><td>Апр<br />2021</td><td>Май<br />2021</td><td>Июн<br />2021</td><td>Июл<br />2021</td><td>Авг<br />2021</td><td>Сен<br />2021</td><td>Окт<br />2021</td><td>Ноя<br />2021</td><td>Дек<br />2021</td><td> </td></tr>
</table>
<br />
<table>
<tr><td width="80" bgcolor="#ECECEC">Месяц</td><td width="80" bgcolor="#FFAA66">Уникальные посетители</td><td width="80" bgcolor="#F4F090">Количество визитов</td><td width="80" bgcolor="#4477DD">Страницы</td><td width="80" bgcolor="#66DDEE">Хиты</td><td width="80" bgcolor="#2EA495">Объем</td></tr>
<tr><td>Янв 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Фев 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Мар 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Апр 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Май 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Июн 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Июл 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Авг 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Сен 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Окт 2021</td><td>2</td><td>6</td><td>660</td><td>1,010</td><td>7.42 МБ</td></tr>
<tr><td>Ноя 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Дек 2021</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td bgcolor="#ECECEC">Всего</td><td bgcolor="#ECECEC">2</td><td bgcolor="#ECECEC">6</td><td bgcolor="#ECECEC">660</td><td bgcolor="#ECECEC">1,010</td><td bgcolor="#ECECEC">7.42 МБ</td></tr>
</table>
<br />
</center>
</td></tr>
</table></td></tr></table><br />
<a name="when"> </a>
<a name="daysofmonth"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">День месяца </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr><td align="center">
<center>
<table>
<tr valign="bottom">
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="91" width="4" alt=Количество визитов: 5 title=Количество визитов: 5 /><img align="bottom" src="/awstatsicons/other/vp.png" height="62" width="4" alt=Страницы: 567 title=Страницы: 567 /><img align="bottom" src="/awstatsicons/other/vh.png" height="91" width="4" alt=Хиты: 835 title=Хиты: 835 /><img align="bottom" src="/awstatsicons/other/vk.png" height="91" width="4" alt=Объем: 6.56 МБ title=Объем: 6.56 МБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="19" width="4" alt=Количество визитов: 1 title=Количество визитов: 1 /><img align="bottom" src="/awstatsicons/other/vp.png" height="11" width="4" alt=Страницы: 93 title=Страницы: 93 /><img align="bottom" src="/awstatsicons/other/vh.png" height="19" width="4" alt=Хиты: 175 title=Хиты: 175 /><img align="bottom" src="/awstatsicons/other/vk.png" height="12" width="4" alt=Объем: 878.40 КБ title=Объем: 878.40 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vv.png" height="1" width="4" alt=Количество визитов: 0 title=Количество визитов: 0 /><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="4" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="4" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="4" alt=Объем: 0 title=Объем: 0 /></td>
<td> </td><td><img align="bottom" src="/awstatsicons/other/vv.png" height="4" width="4" alt=Количество визитов: 0.22 title=Количество визитов: 0.22 /><img align="bottom" src="/awstatsicons/other/vp.png" height="3" width="4" alt=Страницы: 24.44 title=Страницы: 24.44 /><img align="bottom" src="/awstatsicons/other/vh.png" height="5" width="4" alt=Хиты: 37.41 title=Хиты: 37.41 /><img align="bottom" src="/awstatsicons/other/vk.png" height="4" width="4" alt=Объем: 288111.78 title=Объем: 288111.78 /></td>
</tr>
<tr valign="middle"><td>01<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">02<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">03<br /><span style="font-size: 9px;">Окт</span></td>
<td>04<br /><span style="font-size: 9px;">Окт</span></td>
<td>05<br /><span style="font-size: 9px;">Окт</span></td>
<td>06<br /><span style="font-size: 9px;">Окт</span></td>
<td>07<br /><span style="font-size: 9px;">Окт</span></td>
<td>08<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">09<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">10<br /><span style="font-size: 9px;">Окт</span></td>
<td>11<br /><span style="font-size: 9px;">Окт</span></td>
<td>12<br /><span style="font-size: 9px;">Окт</span></td>
<td>13<br /><span style="font-size: 9px;">Окт</span></td>
<td>14<br /><span style="font-size: 9px;">Окт</span></td>
<td>15<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">16<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">17<br /><span style="font-size: 9px;">Окт</span></td>
<td>18<br /><span style="font-size: 9px;">Окт</span></td>
<td>19<br /><span style="font-size: 9px;">Окт</span></td>
<td>20<br /><span style="font-size: 9px;">Окт</span></td>
<td>21<br /><span style="font-size: 9px;">Окт</span></td>
<td>22<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">23<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">24<br /><span style="font-size: 9px;">Окт</span></td>
<td>25<br /><span style="font-size: 9px;">Окт</span></td>
<td>26<br /><span style="font-size: 9px;">Окт</span></td>
<td>27<br /><span style="font-size: 9px;">Окт</span></td>
<td>28<br /><span style="font-size: 9px;">Окт</span></td>
<td>29<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">30<br /><span style="font-size: 9px;">Окт</span></td>
<td bgcolor="#EAEAEA">31<br /><span style="font-size: 9px;">Окт</span></td>
<td> </td><td valign="middle">Среднее</td>
</tr>
</table>
<br />
<table>
<tr><td width="80" bgcolor="#ECECEC">День</td><td width="80" bgcolor="#F4F090">Количество визитов</td><td width="80" bgcolor="#4477DD">Страницы</td><td width="80" bgcolor="#66DDEE">Хиты</td><td width="80" bgcolor="#2EA495">Объем</td></tr><tr><td>01 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>02 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>03 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>04 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>05 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>06 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>07 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>08 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>09 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>10 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>11 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>12 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>13 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>14 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>15 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>16 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>17 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>18 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>19 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>20 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>21 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>22 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>23 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>24 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>25 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>26 Окт 2021</td><td>5</td><td>567</td><td>835</td><td>6.56 МБ</td></tr>
<tr><td>27 Окт 2021</td><td>1</td><td>93</td><td>175</td><td>878.40 КБ</td></tr>
<tr><td>28 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>29 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>30 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>31 Окт 2021</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#ECECEC"><td>Среднее</td><td>0</td><td>24</td><td>37</td><td>281.36 КБ</td></tr>
<tr bgcolor="#ECECEC"><td>Всего</td><td>6</td><td>660</td><td>1,010</td><td>7.42 МБ</td></tr>
</table>
<br /></center>
</td></tr>
</table></td></tr></table><br />
<a name="daysofweek"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">День недели </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr><td align="center"><center>
<table>
<tr valign="bottom">
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="62" width="6" alt=Страницы: 141.75 title=Страницы: 141.75 /><img align="bottom" src="/awstatsicons/other/vh.png" height="91" width="6" alt=Хиты: 208.75 title=Хиты: 208.75 /><img align="bottom" src="/awstatsicons/other/vk.png" height="91" width="6" alt=Объем: 1.64 МБ title=Объем: 1.64 МБ /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="11" width="6" alt=Страницы: 23.25 title=Страницы: 23.25 /><img align="bottom" src="/awstatsicons/other/vh.png" height="19" width="6" alt=Хиты: 43.75 title=Хиты: 43.75 /><img align="bottom" src="/awstatsicons/other/vk.png" height="12" width="6" alt=Объем: 219.60 КБ title=Объем: 219.60 КБ /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td valign="bottom"><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
</tr>
<tr>
<td>Пнд</td><td>Втр</td><td>Срд</td><td>Чтв</td><td>Птн</td><td bgcolor="#EAEAEA">Сбт</td><td bgcolor="#EAEAEA">Вск</td></tr>
</table>
<br />
<table>
<tr><td width="80" bgcolor="#ECECEC">День</td><td width="80" bgcolor="#4477DD">Страницы</td><td width="80" bgcolor="#66DDEE">Хиты</td><td width="80" bgcolor="#2EA495">Объем</td></tr><tr><td>Пнд</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Втр</td><td>141</td><td>208</td><td>1.64 МБ</td></tr>
<tr><td>Срд</td><td>23</td><td>43</td><td>219.60 КБ</td></tr>
<tr><td>Чтв</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>Птн</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>Сбт</td><td>0</td><td>0</td><td>0</td></tr>
<tr bgcolor="#EAEAEA"><td>Вск</td><td>0</td><td>0</td><td>0</td></tr>
</table>
<br />
</center></td></tr>
</table></td></tr></table><br />
<a name="hours"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Часы </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr><td align="center">
<center>
<table>
<tr valign="bottom">
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="28" width="6" alt=Страницы: 93 title=Страницы: 93 /><img align="bottom" src="/awstatsicons/other/vh.png" height="52" width="6" alt=Хиты: 175 title=Хиты: 175 /><img align="bottom" src="/awstatsicons/other/vk.png" height="25" width="6" alt=Объем: 878.40 КБ title=Объем: 878.40 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="9" width="6" alt=Страницы: 28 title=Страницы: 28 /><img align="bottom" src="/awstatsicons/other/vh.png" height="11" width="6" alt=Хиты: 34 title=Хиты: 34 /><img align="bottom" src="/awstatsicons/other/vk.png" height="16" width="6" alt=Объем: 566.86 КБ title=Объем: 566.86 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="3" width="6" alt=Страницы: 10 title=Страницы: 10 /><img align="bottom" src="/awstatsicons/other/vh.png" height="7" width="6" alt=Хиты: 22 title=Хиты: 22 /><img align="bottom" src="/awstatsicons/other/vk.png" height="31" width="6" alt=Объем: 1.06 МБ title=Объем: 1.06 МБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="15" width="6" alt=Страницы: 50 title=Страницы: 50 /><img align="bottom" src="/awstatsicons/other/vh.png" height="18" width="6" alt=Хиты: 59 title=Хиты: 59 /><img align="bottom" src="/awstatsicons/other/vk.png" height="18" width="6" alt=Объем: 623.53 КБ title=Объем: 623.53 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="46" width="6" alt=Страницы: 155 title=Страницы: 155 /><img align="bottom" src="/awstatsicons/other/vh.png" height="91" width="6" alt=Хиты: 305 title=Хиты: 305 /><img align="bottom" src="/awstatsicons/other/vk.png" height="91" width="6" alt=Объем: 3.15 МБ title=Объем: 3.15 МБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="71" width="6" alt=Страницы: 239 title=Страницы: 239 /><img align="bottom" src="/awstatsicons/other/vh.png" height="73" width="6" alt=Хиты: 247 title=Хиты: 247 /><img align="bottom" src="/awstatsicons/other/vk.png" height="21" width="6" alt=Объем: 751.30 КБ title=Объем: 751.30 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="8" width="6" alt=Страницы: 25 title=Страницы: 25 /><img align="bottom" src="/awstatsicons/other/vh.png" height="8" width="6" alt=Хиты: 26 title=Хиты: 26 /><img align="bottom" src="/awstatsicons/other/vk.png" height="4" width="6" alt=Объем: 109.92 КБ title=Объем: 109.92 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="18" width="6" alt=Страницы: 60 title=Страницы: 60 /><img align="bottom" src="/awstatsicons/other/vh.png" height="42" width="6" alt=Хиты: 142 title=Хиты: 142 /><img align="bottom" src="/awstatsicons/other/vk.png" height="10" width="6" alt=Объем: 356.04 КБ title=Объем: 356.04 КБ /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
<td><img align="bottom" src="/awstatsicons/other/vp.png" height="1" width="6" alt=Страницы: 0 title=Страницы: 0 /><img align="bottom" src="/awstatsicons/other/vh.png" height="1" width="6" alt=Хиты: 0 title=Хиты: 0 /><img align="bottom" src="/awstatsicons/other/vk.png" height="1" width="6" alt=Объем: 0 title=Объем: 0 /></td>
</tr>
<tr><th width="19">0</th>
<th width="19">1</th>
<th width="19">2</th>
<th width="19">3</th>
<th width="19">4</th>
<th width="19">5</th>
<th width="19">6</th>
<th width="19">7</th>
<th width="19">8</th>
<th width="19">9</th>
<th width="19">10</th>
<th width="19">11</th>
<th width="19">12</th>
<th width="19">13</th>
<th width="19">14</th>
<th width="19">15</th>
<th width="19">16</th>
<th width="19">17</th>
<th width="19">18</th>
<th width="19">19</th>
<th width="19">20</th>
<th width="19">21</th>
<th width="19">22</th>
<th width="19">23</th>
</tr>
<tr>
<td><img src="/awstatsicons/clock/hr1.png" width="12" alt="0:00 - 1:00 am" /></td>
<td><img src="/awstatsicons/clock/hr2.png" width="12" alt="1:00 - 2:00 am" /></td>
<td><img src="/awstatsicons/clock/hr3.png" width="12" alt="2:00 - 3:00 am" /></td>
<td><img src="/awstatsicons/clock/hr4.png" width="12" alt="3:00 - 4:00 am" /></td>
<td><img src="/awstatsicons/clock/hr5.png" width="12" alt="4:00 - 5:00 am" /></td>
<td><img src="/awstatsicons/clock/hr6.png" width="12" alt="5:00 - 6:00 am" /></td>
<td><img src="/awstatsicons/clock/hr7.png" width="12" alt="6:00 - 7:00 am" /></td>
<td><img src="/awstatsicons/clock/hr8.png" width="12" alt="7:00 - 8:00 am" /></td>
<td><img src="/awstatsicons/clock/hr9.png" width="12" alt="8:00 - 9:00 am" /></td>
<td><img src="/awstatsicons/clock/hr10.png" width="12" alt="9:00 - 10:00 am" /></td>
<td><img src="/awstatsicons/clock/hr11.png" width="12" alt="10:00 - 11:00 am" /></td>
<td><img src="/awstatsicons/clock/hr12.png" width="12" alt="11:00 - 12:00 am" /></td>
<td><img src="/awstatsicons/clock/hr1.png" width="12" alt="0:00 - 1:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr2.png" width="12" alt="1:00 - 2:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr3.png" width="12" alt="2:00 - 3:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr4.png" width="12" alt="3:00 - 4:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr5.png" width="12" alt="4:00 - 5:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr6.png" width="12" alt="5:00 - 6:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr7.png" width="12" alt="6:00 - 7:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr8.png" width="12" alt="7:00 - 8:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr9.png" width="12" alt="8:00 - 9:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr10.png" width="12" alt="9:00 - 10:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr11.png" width="12" alt="10:00 - 11:00 pm" /></td>
<td><img src="/awstatsicons/clock/hr12.png" width="12" alt="11:00 - 12:00 pm" /></td>
</tr>
</table>
<br />
<table width="650"><tr>
<td align="center"><center>
<table>
<tr><td width="80" bgcolor="#ECECEC">Часы</td><td width="80" bgcolor="#4477DD">Страницы</td><td width="80" bgcolor="#66DDEE">Хиты</td><td width="80" bgcolor="#2EA495">Объем</td></tr><tr><td>00</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>01</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>02</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>03</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>04</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>05</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>06</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>07</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>08</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>09</td><td>93</td><td>175</td><td>878.40 КБ</td></tr>
<tr><td>10</td><td>28</td><td>34</td><td>566.86 КБ</td></tr>
<tr><td>11</td><td>0</td><td>0</td><td>0</td></tr>
</table>
</center></td><td width="10"> </td><td align="center"><center>
<table>
<tr><td width="80" bgcolor="#ECECEC">Часы</td><td width="80" bgcolor="#4477DD">Страницы</td><td width="80" bgcolor="#66DDEE">Хиты</td><td width="80" bgcolor="#2EA495">Объем</td></tr>
<tr><td>12</td><td>10</td><td>22</td><td>1.06 МБ</td></tr>
<tr><td>13</td><td>50</td><td>59</td><td>623.53 КБ</td></tr>
<tr><td>14</td><td>155</td><td>305</td><td>3.15 МБ</td></tr>
<tr><td>15</td><td>239</td><td>247</td><td>751.30 КБ</td></tr>
<tr><td>16</td><td>25</td><td>26</td><td>109.92 КБ</td></tr>
<tr><td>17</td><td>60</td><td>142</td><td>356.04 КБ</td></tr>
<tr><td>18</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>19</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>20</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>21</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>22</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>23</td><td>0</td><td>0</td><td>0</td></tr>
</table>
</center></td></tr></table>
<br />
</center></td></tr>
</table></td></tr></table><br />
<a name="who"> </a>
<a name="countries"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Посетители домены/страны (Топ 10) - <a href="awstats.vdi.alldomains.html" target="awstatsbis">Полный список</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th width="32"> </th><th colspan="2">Домены/Страны</th><th bgcolor="#4477DD" width="80">Страницы</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#2EA495" width="80">Объем</th><th> </th></tr>
<tr><td width="32"><img src="/awstatsicons/flags/ip.png" height="14" alt=Неизвестный title=Неизвестный /></td><td class="aws">Неизвестный</td><td>ip</td><td>660</td><td>1,010</td><td>7.42 МБ</td><td class="aws"><img src="/awstatsicons/other/hp.png" width="170" height="5" alt= title= /><br />
<img src="/awstatsicons/other/hh.png" width="261" height="5" alt= title= /><br />
<img src="/awstatsicons/other/hk.png" width="261" height="5" alt= title= /></td></tr>
<tr><td width="32"> </td><td colspan="2" class="aws"><span style="color: #666688">Остальные</span></td><td>0</td><td>0</td><td>0</td><td class="aws"> </td></tr>
</table></td></tr></table><br />
<a name="visitors"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Хосты (Топ 10) - <a href="awstats.vdi.allhosts.html" target="awstatsbis">Полный список</a> - <a href="awstats.vdi.lasthosts.html" target="awstatsbis">Последний визит</a> - <a href="awstats.vdi.unknownip.html" target="awstatsbis">Неразрешенный IP адрес</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>Хосты : 0 Известные, 2 Неизвестный<br />2 Уникальные посетители</th><th bgcolor="#4477DD" width="80">Страницы</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#2EA495" width="80">Объем</th><th width="120">Последний визит</th></tr>
<tr><td class="aws">192.168.14.211</td><td>646</td><td>952</td><td>5.68 МБ</td><td nowrap="nowrap">27 Окт 2021 - 09:43</td></tr>
<tr><td class="aws">127.0.0.1</td><td>14</td><td>58</td><td>1.74 МБ</td><td nowrap="nowrap">26 Окт 2021 - 14:34</td></tr>
</table></td></tr></table><br />
<a name="robots"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Роботы/Пауки посетители (Топ 10) - <a href="awstats.vdi.allrobots.html" target="awstatsbis">Полный список</a> - <a href="awstats.vdi.lastrobots.html" target="awstatsbis">Последний визит</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>0 различные роботы*</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#2EA495" width="80">Объем</th><th width="120">Последний визит</th></tr>
</table></td></tr></table><span style="font: 11px verdana, arial, helvetica;">* Роботы отображенные здесь генерируют трафик не отображаемый посетителям, поэтому они не включены в остальную статистику.</span><br />
<br />
<a name="how"> </a>
<a name="sessions"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Продолжительность визитов </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>Количество визитов: 6 - Среднее: 861 s</th><th bgcolor="#8888DD" width="80">Количество визитов</th><th bgcolor="#8888DD" width="80">Процент</th></tr>
<tr><td class="aws">0s-30s</td><td>2</td><td>33.3 %</td></tr>
<tr><td class="aws">30s-2mn</td><td>1</td><td>16.6 %</td></tr>
<tr><td class="aws">2mn-5mn</td><td> </td><td> </td></tr>
<tr><td class="aws">5mn-15mn</td><td>1</td><td>16.6 %</td></tr>
<tr><td class="aws">15mn-30mn</td><td> </td><td> </td></tr>
<tr><td class="aws">30mn-1h</td><td> </td><td> </td></tr>
<tr><td class="aws">1h+</td><td>1</td><td>16.6 %</td></tr>
<tr><td class="aws"><span style="color: #666688">Неизвестный</span></td><td>1</td><td>16.6 %</td></tr>
</table></td></tr></table><br />
<a name="filetypes"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Тип файла </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th colspan="3">Тип файла</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">Процент</th><th bgcolor="#2EA495" width="80">Объем</th><th bgcolor="#2EA495" width="80">Процент</th></tr>
<tr><td width="32"><img src="/awstatsicons/mime/pl.png" alt= title= /></td><td class="aws">pl</td><td class="aws">Dynamic Perl Script file</td><td>431</td><td>42.6 %</td><td nowrap="nowrap">3.72 МБ</td><td>50 %</td></tr>
<tr><td><img src="/awstatsicons/mime/image.png" alt= title= /></td><td class="aws">png</td><td class="aws">Image</td><td>315</td><td>31.1 %</td><td nowrap="nowrap">454.79 КБ</td><td>5.9 %</td></tr>
<tr><td><img src="/awstatsicons/mime/unknown.png" alt= title= /></td><td class="aws" colspan="2"><span style="color: #666688">Неизвестный</span></td><td>172</td><td>17 %</td><td nowrap="nowrap">199.14 КБ</td><td>2.6 %</td></tr>
<tr><td><img src="/awstatsicons/mime/php.png" alt= title= /></td><td class="aws">php</td><td class="aws">Dynamic PHP Script file</td><td>57</td><td>5.6 %</td><td nowrap="nowrap">106.16 КБ</td><td>1.3 %</td></tr>
<tr><td><img src="/awstatsicons/mime/jscript.png" alt= title= /></td><td class="aws">js</td><td class="aws">JavaScript file</td><td>21</td><td>2 %</td><td nowrap="nowrap">2.90 МБ</td><td>39 %</td></tr>
<tr><td><img src="/awstatsicons/mime/image.png" alt= title= /></td><td class="aws">gif</td><td class="aws">Image</td><td>8</td><td>0.7 %</td><td nowrap="nowrap">3.94 КБ</td><td>0 %</td></tr>
<tr><td><img src="/awstatsicons/mime/css.png" alt= title= /></td><td class="aws">css</td><td class="aws">Cascading Style Sheet file</td><td>6</td><td>0.5 %</td><td nowrap="nowrap">61.51 КБ</td><td>0.8 %</td></tr>
</table></td></tr></table><br />
<a name="downloads"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Downloads (Топ 10) - <a href="awstats.vdi.downloads.html" target="awstatsbis">Полный список</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th colspan="2">Downloads: 0</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">206 Хиты</th><th bgcolor="#2EA495" width="80">Объем</th><th bgcolor="#2EA495" width="80">Средний размер</th></tr>
</table></td></tr></table><br />
<a name="urls"> </a><a name="entry"> </a><a name="exit"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Адрес страницы (Топ 10) - <a href="awstats.vdi.urldetail.html" target="awstatsbis">Полный список</a> - <a href="awstats.vdi.urlentry.html" target="awstatsbis">Вхождение</a> - <a href="awstats.vdi.urlexit.html" target="awstatsbis">Выход</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>15 Различные url</th><th bgcolor="#4477DD" width="80">Просмотров</th><th bgcolor="#2EA495" width="80">Средний размер</th><th bgcolor="#CEC2E8" width="80">Вхождение</th><th bgcolor="#C1B2E2" width="80">Выход</th><th> </th></tr>
<tr><td class="aws"><a href="http://vdi/awstats/awstats.pl" target="url" rel="nofollow">/awstats/awstats.pl</a></td><td>428</td><td>8.87 КБ</td><td>4</td><td>3</td><td class="aws"><img src="/awstatsicons/other/hp.png" width="261" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="261" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="3" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="2" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/events" target="url" rel="nofollow">/api/events</a></td><td>86</td><td>1.51 КБ</td><td> </td><td>1</td><td class="aws"><img src="/awstatsicons/other/hp.png" width="53" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="45" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="2" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/pools" target="url" rel="nofollow">/api/pools</a></td><td>39</td><td>656 Байты</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="24" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="19" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/controllers" target="url" rel="nofollow">/api/controllers</a></td><td>30</td><td>524 Байты</td><td>1</td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="19" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="16" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="2" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/license/" target="url" rel="nofollow">/api/license/</a></td><td>16</td><td>646 Байты</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="10" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="19" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/version/" target="url" rel="nofollow">/api/version/</a></td><td>15</td><td>470 Байты</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="10" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="14" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/ws/subscriptions/" target="url" rel="nofollow">/api/ws/subscriptions/</a></td><td>15</td><td>4.55 КБ</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="10" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="134" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/" target="url" rel="nofollow">/</a></td><td>11</td><td>1.90 КБ</td><td>1</td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="7" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="56" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="2" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/resources" target="url" rel="nofollow">/api/resources</a></td><td>6</td><td>577 Байты</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="4" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="17" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><a href="http://vdi/api/settings" target="url" rel="nofollow">/api/settings</a></td><td>3</td><td>527 Байты</td><td> </td><td> </td><td class="aws"><img src="/awstatsicons/other/hp.png" width="2" height="4" alt= title= /><br /><img src="/awstatsicons/other/hk.png" width="16" height="4" alt= title= /><br /><img src="/awstatsicons/other/he.png" width="1" height="4" alt= title= /><br /><img src="/awstatsicons/other/hx.png" width="1" height="4" alt= title= /></td></tr>
<tr><td class="aws"><span style="color: #666688">Остальные</span></td><td>11</td><td>2.94 КБ</td><td> </td><td>1</td><td> </td></tr>
</table></td></tr></table><br />
<a name="os"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Операционные системы (Топ 10) - <a href="awstats.vdi.osdetail.html" target="awstatsbis">Полный список/Версии</a> - <a href="awstats.vdi.unknownos.html" target="awstatsbis">Неизвестный</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th width="32"> </th><th>Операционные системы</th><th bgcolor="#4477DD" width="80">Страницы</th><th bgcolor="#4477DD" width="80">Процент</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">Процент</th></tr>
<tr><td width="32"><img src="/awstatsicons/os/linux.png" alt= title= /></td><td class="aws"><b>Linux</b></td><td>651</td><td>98.6 %</td><td>894</td><td>88.5 %</td></tr>
<tr><td><img src="/awstatsicons/os/win.png" alt= title= /></td><td class="aws"><b>Windows</b></td><td>9</td><td>1.3 %</td><td>116</td><td>11.4 %</td></tr>
</table></td></tr></table><br />
<a name="browsers"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Браузеры (Топ 10) - <a href="awstats.vdi.browserdetail.html" target="awstatsbis">Полный список/Версии</a> - <a href="awstats.vdi.unknownbrowser.html" target="awstatsbis">Неизвестный</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th width="32"> </th><th>Браузеры</th><th width="80">Грабер</th><th bgcolor="#4477DD" width="80">Страницы</th><th bgcolor="#4477DD" width="80">Процент</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">Процент</th></tr>
<tr><td width="32"><img src="/awstatsicons/browser/firefox.png" alt= title= /></td><td class="aws"><b>Firefox</b></td><td>Нет</td><td>651</td><td>98.6 %</td><td>894</td><td>88.5 %</td></tr>
<tr><td><img src="/awstatsicons/browser/chrome.png" alt= title= /></td><td class="aws"><b>Google Chrome</b></td><td>Нет</td><td>9</td><td>1.3 %</td><td>116</td><td>11.4 %</td></tr>
</table></td></tr></table><br />
<a name="refering"> </a>
<a name="referer"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Соединение с сайтом из </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>Происхождение</th><th bgcolor="#4477DD" width="80">Страницы</th><th bgcolor="#4477DD" width="80">Процент</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">Процент</th></tr>
<tr><td class="aws"><b>Прямой адрес / Закладки</b></td><td>72</td><td>11 %</td><td>74</td><td>7.7 %</td></tr>
<tr><td class="aws"><b>Ссылки из поисковых систем</b> - <a href="awstats.vdi.refererse.html" target="awstatsbis">Полный список</a><br />
</td>
<td valign="top"> </td><td valign="top"> </td><td valign="top"> </td><td valign="top"> </td></tr>
<tr><td class="aws"><b>Ссылки из внешней страницы (остальные web сайты исключая поисковые системы)</b> - <a href="awstats.vdi.refererpages.html" target="awstatsbis">Полный список</a><br />
<table>
<tr><td class="aws">- <a href="https://192.168.5.248/awstats/awstats.pl" target="url" rel="nofollow">https://192.168.5.248/awstats/awstats.pl</a></td><td>376</td><td>648</td></tr>
<tr><td class="aws">- <a href="https://192.168.5.248" target="url" rel="nofollow">https://192.168.5.248</a></td><td>203</td><td>236</td></tr>
<tr><td class="aws">- <a href="https://192.168.5.248/cgi-bin/awstats.pl" target="url" rel="nofollow">https://192.168.5.248/cgi-bin/awstats.pl</a></td><td>0</td><td>1</td></tr>
</table></td>
<td valign="top">579</td><td valign="top">88.9 %</td><td valign="top">885</td><td valign="top">92.2 %</td></tr>
<tr><td class="aws"><b>Неизвестное происхождение</b></td><td> </td><td> </td><td> </td><td> </td></tr>
</table></td></tr></table><br />
<a name="keys"> </a>
<a name="keyphrases"> </a><a name="keywords"> </a><br />
<table width="100%" cellpadding="0" cellspacing="0"><tr><td width="50%" valign="top">
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="95%">Поисковые Ключевые фразы (Топ 10)<br /><a href="awstats.vdi.keyphrases.html" target="awstatsbis">Полный список</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>0 Различные ключевые фразы</th><th bgcolor="#8888DD" width="80">Поиск</th><th bgcolor="#8888DD" width="80">Процент</th></tr>
</table></td></tr></table><br />
</td>
<td> </td><td width="50%" valign="top">
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="95%">Поисковые Ключевые слова (Топ 10)<br /><a href="awstats.vdi.keywords.html" target="awstatsbis">Полный список</a> </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>0 различные ключевые слова</th><th bgcolor="#8888DD" width="80">Поиск</th><th bgcolor="#8888DD" width="80">Процент</th></tr>
</table></td></tr></table><br />
</td>
</tr></table>
<a name="other"> </a>
<a name="misc"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Смешанные </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th>Смешанные</th><th width="100"> </th><th width="100"> </th></tr>
<tr><td class="aws">Добавить в закладки (предполагаемый)</td><td>9 / 2 Посетители</td><td>450 %</td></tr>
</table></td></tr></table><br />
<a name="errors"> </a><br />
<table class="aws_border sortable" border="0" cellpadding="2" cellspacing="0" width="100%">
<tr><td class="aws_title" width="70%">Статусы ошибок HTTP </td><td class="aws_blank"> </td></tr>
<tr><td colspan="2">
<table class="aws_data" border="1" cellpadding="2" cellspacing="0" width="100%">
<tr bgcolor="#ECECEC"><th colspan="2">Статусы ошибок HTTP*</th><th bgcolor="#66DDEE" width="80">Хиты</th><th bgcolor="#66DDEE" width="80">Процент</th><th bgcolor="#2EA495" width="80">Объем</th></tr>
<tr><td><a href="awstats.vdi.errors404.html" target="awstatsbis">404</a></td><td class="aws">Document Not Found (hits on favicon excluded)</td><td>2</td><td>100 %</td><td>1.44 КБ</td></tr>
</table></td></tr></table><span style="font: 11px verdana, arial, helvetica;">* Коды отображенные здесь генерируют трафик не отображаемый посетителям, поэтому они не включены в остальную статистику.</span><br />
<br />
<br /><br />
<span dir="ltr" style="font: 11px verdana, arial, helvetica; color: #000000;"><b>Advanced Web Statistics 7.5 (build 20160301)</b> - <a href="http://www.awstats.org" target="awstatshome">Создано awstats</a></span><br />
<br />
</body>
</html>
'''
}
| 119.166405
| 618
| 0.665125
| 13,264
| 75,909
| 3.799231
| 0.043501
| 0.054452
| 0.128986
| 0.094795
| 0.866827
| 0.847677
| 0.829818
| 0.81428
| 0.789951
| 0.76467
| 0
| 0.057892
| 0.07567
| 75,909
| 636
| 619
| 119.353774
| 0.660315
| 0.000817
| 0
| 0.33277
| 0
| 0.655405
| 0.99818
| 0.393096
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003378
| 0
| 0.003378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b6b8f2d08d391a1621662cd793c719c7dcbdce79
| 64
|
py
|
Python
|
backend/factories/__init__.py
|
heidal/apollo
|
576743e12048985ae8ef127224e1cb8ac49acd28
|
[
"MIT"
] | 2
|
2020-02-28T16:24:55.000Z
|
2020-03-27T17:12:50.000Z
|
backend/factories/__init__.py
|
heidal/apollo
|
576743e12048985ae8ef127224e1cb8ac49acd28
|
[
"MIT"
] | 51
|
2020-02-12T20:52:08.000Z
|
2022-02-27T00:23:20.000Z
|
backend/factories/__init__.py
|
heidal/apollo
|
576743e12048985ae8ef127224e1cb8ac49acd28
|
[
"MIT"
] | null | null | null |
from .elections import * # no qa
from .users import * # no qa
| 21.333333
| 33
| 0.65625
| 10
| 64
| 4.2
| 0.6
| 0.380952
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 64
| 2
| 34
| 32
| 0.875
| 0.171875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b6bbae04af00bbd61a82163c1307ab044be24aef
| 15,620
|
py
|
Python
|
pymtl3/stdlib/rtl/queues.py
|
hsqforfun/pymtl3
|
05e06601cf262a663a95d1235cb99056ece84580
|
[
"BSD-3-Clause"
] | 1
|
2019-11-12T12:26:01.000Z
|
2019-11-12T12:26:01.000Z
|
pymtl3/stdlib/rtl/queues.py
|
hsqforfun/pymtl3
|
05e06601cf262a663a95d1235cb99056ece84580
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/stdlib/rtl/queues.py
|
hsqforfun/pymtl3
|
05e06601cf262a663a95d1235cb99056ece84580
|
[
"BSD-3-Clause"
] | null | null | null |
"""
-------------------------------------------------------------------------
Library of RTL queues
-------------------------------------------------------------------------
Author : Yanghui Ou
Date : Mar 23, 2019
"""
from pymtl3 import *
from pymtl3.stdlib.ifcs import DeqIfcRTL, EnqIfcRTL
from pymtl3.stdlib.rtl import Mux, RegisterFile
#-------------------------------------------------------------------------
# Dpath and Ctrl for NormalQueueRTL
#-------------------------------------------------------------------------
class NormalQueueDpathRTL( Component ):
def construct( s, EntryType, num_entries=2 ):
# Interface
s.enq_msg = InPort( EntryType )
s.deq_msg = OutPort( EntryType )
s.wen = InPort( Bits1 )
s.waddr = InPort( mk_bits( clog2( num_entries ) ) )
s.raddr = InPort( mk_bits( clog2( num_entries ) ) )
# Component
s.queue = RegisterFile( EntryType, num_entries )(
raddr = { 0: s.raddr },
rdata = { 0: s.deq_msg },
wen = { 0: s.wen },
waddr = { 0: s.waddr },
wdata = { 0: s.enq_msg },
)
class NormalQueueCtrlRTL( Component ):
def construct( s, num_entries=2 ):
# Constants
addr_nbits = clog2 ( num_entries )
count_nbits = clog2 ( num_entries+1 )
PtrType = mk_bits ( addr_nbits )
CountType = mk_bits ( count_nbits )
s.last_idx = PtrType ( num_entries-1 )
s.num_entries = CountType( num_entries )
# Interface
s.enq_en = InPort ( Bits1 )
s.enq_rdy = OutPort( Bits1 )
s.deq_en = InPort ( Bits1 )
s.deq_rdy = OutPort( Bits1 )
s.count = OutPort( CountType )
s.wen = OutPort( Bits1 )
s.waddr = OutPort( PtrType )
s.raddr = OutPort( PtrType )
# Registers
s.head = Wire( PtrType )
s.tail = Wire( PtrType )
# Wires
s.enq_xfer = Wire( Bits1 )
s.deq_xfer = Wire( Bits1 )
s.head_next = Wire( PtrType )
s.tail_next = Wire( PtrType )
# Connections
connect( s.wen, s.enq_xfer )
connect( s.waddr, s.tail )
connect( s.raddr, s.head )
@s.update
def up_rdy_signals():
s.enq_rdy = ( s.count < s.num_entries ) & ~s.reset
s.deq_rdy = ( s.count > CountType(0) ) & ~s.reset
@s.update
def up_xfer_signals():
s.enq_xfer = s.enq_en & s.enq_rdy
s.deq_xfer = s.deq_en & s.deq_rdy
@s.update
def up_next():
s.head_next = s.head + PtrType(1) if s.head < s.last_idx else PtrType(0)
s.tail_next = s.tail + PtrType(1) if s.tail < s.last_idx else PtrType(0)
@s.update_ff
def up_reg():
if s.reset:
s.head <<= PtrType(0)
s.tail <<= PtrType(0)
s.count <<= CountType(0)
else:
s.head <<= s.head_next if s.deq_xfer else s.head
s.tail <<= s.tail_next if s.enq_xfer else s.tail
s.count <<= s.count + CountType(1) if s.enq_xfer & ~s.deq_xfer else \
s.count - CountType(1) if s.deq_xfer & ~s.enq_xfer else \
s.count
#-------------------------------------------------------------------------
# NormalQueueRTL
#-------------------------------------------------------------------------
class NormalQueueRTL( Component ):
def construct( s, EntryType, num_entries=2 ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort( mk_bits( clog2( num_entries+1 ) ) )
# Components
assert num_entries > 0
if num_entries == 1:
s.q = NormalQueue1EntryRTL( EntryType )
connect( s.enq, s.q.enq )
connect( s.deq, s.q.deq )
connect( s.count, s.q.count )
else:
s.ctrl = NormalQueueCtrlRTL ( num_entries )
s.dpath = NormalQueueDpathRTL( EntryType, num_entries )
# Connect ctrl to data path
connect( s.ctrl.wen, s.dpath.wen )
connect( s.ctrl.waddr, s.dpath.waddr )
connect( s.ctrl.raddr, s.dpath.raddr )
# Connect to interface
connect( s.enq.en, s.ctrl.enq_en )
connect( s.enq.rdy, s.ctrl.enq_rdy )
connect( s.deq.en, s.ctrl.deq_en )
connect( s.deq.rdy, s.ctrl.deq_rdy )
connect( s.count, s.ctrl.count )
connect( s.enq.msg, s.dpath.enq_msg )
connect( s.deq.msg, s.dpath.deq_msg )
# Line trace
def line_trace( s ):
return "{}({}){}".format( s.enq, s.count, s.deq )
#-------------------------------------------------------------------------
# Ctrl for PipeQueue
#-------------------------------------------------------------------------
class PipeQueueCtrlRTL( Component ):
def construct( s, num_entries=2 ):
# Constants
addr_nbits = clog2 ( num_entries )
count_nbits = clog2 ( num_entries+1 )
PtrType = mk_bits ( addr_nbits )
CountType = mk_bits ( count_nbits )
s.last_idx = PtrType ( num_entries-1 )
s.num_entries = CountType( num_entries )
# Interface
s.enq_en = InPort ( Bits1 )
s.enq_rdy = OutPort( Bits1 )
s.deq_en = InPort ( Bits1 )
s.deq_rdy = OutPort( Bits1 )
s.count = OutPort( CountType )
s.wen = OutPort( Bits1 )
s.waddr = OutPort( PtrType )
s.raddr = OutPort( PtrType )
# Registers
s.head = Wire( PtrType )
s.tail = Wire( PtrType )
# Wires
s.enq_xfer = Wire( Bits1 )
s.deq_xfer = Wire( Bits1 )
s.head_next = Wire( PtrType )
s.tail_next = Wire( PtrType )
# Connections
connect( s.wen, s.enq_xfer )
connect( s.waddr, s.tail )
connect( s.raddr, s.head )
@s.update
def up_rdy_signals():
s.deq_rdy = ( s.count > CountType(0) ) & ~s.reset
@s.update
def up_enq_rdy():
if s.reset:
s.enq_rdy = b1(0)
else:
s.enq_rdy = ( s.count < s.num_entries ) | s.deq_en
@s.update
def up_xfer_signals():
s.enq_xfer = s.enq_en & s.enq_rdy
s.deq_xfer = s.deq_en & s.deq_rdy
@s.update
def up_next():
s.head_next = s.head + PtrType(1) if s.head < s.last_idx else PtrType(0)
s.tail_next = s.tail + PtrType(1) if s.tail < s.last_idx else PtrType(0)
@s.update_ff
def up_reg():
if s.reset:
s.head <<= PtrType(0)
s.tail <<= PtrType(0)
s.count <<= CountType(0)
else:
s.head <<= s.head_next if s.deq_xfer else s.head
s.tail <<= s.tail_next if s.enq_xfer else s.tail
s.count <<= s.count + CountType(1) if s.enq_xfer & ~s.deq_xfer else \
s.count - CountType(1) if s.deq_xfer & ~s.enq_xfer else \
s.count
#-------------------------------------------------------------------------
# PipeQueueRTL
#-------------------------------------------------------------------------
class PipeQueueRTL( Component ):
def construct( s, EntryType, num_entries=2 ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort( mk_bits( clog2( num_entries+1 ) ) )
# Components
assert num_entries > 0
if num_entries == 1:
s.q = PipeQueue1EntryRTL( EntryType )
connect( s.enq, s.q.enq )
connect( s.deq, s.q.deq )
connect( s.count, s.q.count )
else:
s.ctrl = PipeQueueCtrlRTL ( num_entries )
s.dpath = NormalQueueDpathRTL( EntryType, num_entries )
# Connect ctrl to data path
connect( s.ctrl.wen, s.dpath.wen )
connect( s.ctrl.waddr, s.dpath.waddr )
connect( s.ctrl.raddr, s.dpath.raddr )
# Connect to interface
connect( s.enq.en, s.ctrl.enq_en )
connect( s.enq.rdy, s.ctrl.enq_rdy )
connect( s.deq.en, s.ctrl.deq_en )
connect( s.deq.rdy, s.ctrl.deq_rdy )
connect( s.count, s.ctrl.count )
connect( s.enq.msg, s.dpath.enq_msg )
connect( s.deq.msg, s.dpath.deq_msg )
# Line trace
def line_trace( s ):
return "{}({}){}".format( s.enq, s.count, s.deq )
#-------------------------------------------------------------------------
# Ctrl and Dpath for BypassQueue
#-------------------------------------------------------------------------
class BypassQueueDpathRTL( Component ):
def construct( s, EntryType, num_entries=2 ):
# Interface
s.enq_msg = InPort( EntryType )
s.deq_msg = OutPort( EntryType )
s.wen = InPort( Bits1 )
s.waddr = InPort( mk_bits( clog2( num_entries ) ) )
s.raddr = InPort( mk_bits( clog2( num_entries ) ) )
s.mux_sel = InPort( Bits1 )
# Component
s.queue = RegisterFile( EntryType, num_entries )(
raddr = { 0: s.raddr },
wen = { 0: s.wen },
waddr = { 0: s.waddr },
wdata = { 0: s.enq_msg },
)
s.mux = Mux( EntryType, 2 )(
sel = s.mux_sel,
in_ = { 0: s.queue.rdata[0], 1: s.enq_msg },
out = s.deq_msg,
)
class BypassQueueCtrlRTL( Component ):
def construct( s, num_entries=2 ):
# Constants
addr_nbits = clog2 ( num_entries )
count_nbits = clog2 ( num_entries+1 )
PtrType = mk_bits ( addr_nbits )
CountType = mk_bits ( count_nbits )
s.last_idx = PtrType ( num_entries-1 )
s.num_entries = CountType( num_entries )
# Interface
s.enq_en = InPort ( Bits1 )
s.enq_rdy = OutPort( Bits1 )
s.deq_en = InPort ( Bits1 )
s.deq_rdy = OutPort( Bits1 )
s.count = OutPort( CountType )
s.wen = OutPort( Bits1 )
s.waddr = OutPort( PtrType )
s.raddr = OutPort( PtrType )
s.mux_sel = OutPort( Bits1 )
# Registers
s.head = Wire( PtrType )
s.tail = Wire( PtrType )
# Wires
s.enq_xfer = Wire( Bits1 )
s.deq_xfer = Wire( Bits1 )
s.head_next = Wire( PtrType )
s.tail_next = Wire( PtrType )
# Connections
connect( s.wen, s.enq_xfer )
connect( s.waddr, s.tail )
connect( s.raddr, s.head )
@s.update
def up_enq_rdy():
s.enq_rdy = ( s.count < s.num_entries ) & ~s.reset
@s.update
def up_deq_rdy():
if s.reset:
s.deq_rdy = b1(0)
else:
s.deq_rdy = ( s.count > CountType(0) ) | s.enq_en
@s.update
def up_mux_sel():
s.mux_sel = s.count == CountType(0)
@s.update
def up_xfer_signals():
s.enq_xfer = s.enq_en & s.enq_rdy
s.deq_xfer = s.deq_en & s.deq_rdy
@s.update
def up_next():
s.head_next = s.head + PtrType(1) if s.head < s.last_idx else PtrType(0)
s.tail_next = s.tail + PtrType(1) if s.tail < s.last_idx else PtrType(0)
@s.update_ff
def up_reg():
if s.reset:
s.head <<= PtrType(0)
s.tail <<= PtrType(0)
s.count <<= CountType(0)
else:
s.head <<= s.head_next if s.deq_xfer else s.head
s.tail <<= s.tail_next if s.enq_xfer else s.tail
s.count <<= s.count + CountType(1) if s.enq_xfer & ~s.deq_xfer else \
s.count - CountType(1) if s.deq_xfer & ~s.enq_xfer else \
s.count
#-------------------------------------------------------------------------
# BypassQueueRTL
#-------------------------------------------------------------------------
class BypassQueueRTL( Component ):
def construct( s, EntryType, num_entries=2 ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort( mk_bits( clog2( num_entries+1 ) ) )
# Components
assert num_entries > 0
if num_entries == 1:
s.q = BypassQueue1EntryRTL( EntryType )
connect( s.enq, s.q.enq )
connect( s.deq, s.q.deq )
connect( s.count, s.q.count )
else:
s.ctrl = BypassQueueCtrlRTL ( num_entries )
s.dpath = BypassQueueDpathRTL( EntryType, num_entries )
# Connect ctrl to data path
connect( s.ctrl.wen, s.dpath.wen )
connect( s.ctrl.waddr, s.dpath.waddr )
connect( s.ctrl.raddr, s.dpath.raddr )
connect( s.ctrl.mux_sel, s.dpath.mux_sel )
# Connect to interface
connect( s.enq.en, s.ctrl.enq_en )
connect( s.enq.rdy, s.ctrl.enq_rdy )
connect( s.deq.en, s.ctrl.deq_en )
connect( s.deq.rdy, s.ctrl.deq_rdy )
connect( s.count, s.ctrl.count )
connect( s.enq.msg, s.dpath.enq_msg )
connect( s.deq.msg, s.dpath.deq_msg )
# Line trace
def line_trace( s ):
return "{}({}){}".format( s.enq, s.count, s.deq )
#-------------------------------------------------------------------------
# NormalQueue1EntryRTL
#-------------------------------------------------------------------------
class NormalQueue1EntryRTL( Component ):
def construct( s, EntryType ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort ( Bits1 )
# Components
s.entry = Wire( EntryType )
s.full = Wire( Bits1 )
connect( s.count, s.full )
# Logic
@s.update_ff
def up_full():
if s.reset:
s.full <<= b1(0)
else:
s.full <<= ~s.deq.en & (s.enq.en | s.full)
@s.update_ff
def up_entry():
if s.enq.en:
s.entry <<= s.enq.msg
@s.update
def up_enq_rdy():
if s.reset:
s.enq.rdy = b1(0)
else:
s.enq.rdy = ~s.full
@s.update
def up_deq_rdy():
s.deq.rdy = s.full & ~s.reset
connect( s.entry, s.deq.msg )
def line_trace( s ):
return "{}({}){}".format( s.enq, s.full, s.deq )
#-------------------------------------------------------------------------
# PipeQueue1EntryRTL
#-------------------------------------------------------------------------
class PipeQueue1EntryRTL( Component ):
def construct( s, EntryType ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort ( Bits1 )
# Components
s.entry = Wire( EntryType )
s.full = Wire( Bits1 )
connect( s.count, s.full )
# Logic
@s.update_ff
def up_full():
if s.reset:
s.full <<= b1(0)
else:
s.full <<= s.enq.en | s.full & ~s.deq.en
@s.update_ff
def up_entry():
if s.enq.en:
s.entry <<= s.enq.msg
@s.update
def up_enq_rdy():
s.enq.rdy = ( ~s.full | s.deq.en ) & ~s.reset
@s.update
def up_deq_rdy():
s.deq.rdy = s.full & ~s.reset
connect( s.entry, s.deq.msg )
def line_trace( s ):
return "{}({}){}".format( s.enq, s.full, s.deq )
#-------------------------------------------------------------------------
# BypassQueue1EntryRTL
#-------------------------------------------------------------------------
class BypassQueue1EntryRTL( Component ):
def construct( s, EntryType ):
# Interface
s.enq = EnqIfcRTL( EntryType )
s.deq = DeqIfcRTL( EntryType )
s.count = OutPort ( Bits1 )
# Components
s.entry = Wire( EntryType )
s.full = Wire( Bits1 )
connect( s.count, s.full )
# Logic
@s.update_ff
def up_full():
if s.reset:
s.full <<= b1(0)
else:
s.full <<= ~s.deq.en & (s.enq.en | s.full)
@s.update_ff
def up_entry():
if s.enq.en & ~s.deq.en:
s.entry <<= s.enq.msg
@s.update
def up_enq_rdy():
s.enq.rdy = ~s.full & ~s.reset
@s.update
def up_deq_rdy():
s.deq.rdy = ( s.full | s.enq.en ) & ~s.reset
@s.update
def up_deq_msg():
s.deq.msg = s.entry if s.full else s.enq.msg
def line_trace( s ):
return "{}({}){}".format( s.enq, s.full, s.deq )
| 25.648604
| 78
| 0.509219
| 2,019
| 15,620
| 3.811293
| 0.053492
| 0.041066
| 0.024691
| 0.02963
| 0.88603
| 0.878622
| 0.875244
| 0.875114
| 0.866537
| 0.864327
| 0
| 0.012512
| 0.278553
| 15,620
| 608
| 79
| 25.690789
| 0.670335
| 0.141933
| 0
| 0.870166
| 0
| 0
| 0.003602
| 0
| 0
| 0
| 0
| 0
| 0.008287
| 1
| 0.124309
| false
| 0.019337
| 0.008287
| 0.016575
| 0.179558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6f443c89de17865d2aff7e7d3e5c479bc0ccb12
| 92
|
py
|
Python
|
taggable/__init__.py
|
ville-k/taggable
|
d37ecdfba5bc09277f19ea51344ad2ea3ae100f2
|
[
"MIT"
] | null | null | null |
taggable/__init__.py
|
ville-k/taggable
|
d37ecdfba5bc09277f19ea51344ad2ea3ae100f2
|
[
"MIT"
] | null | null | null |
taggable/__init__.py
|
ville-k/taggable
|
d37ecdfba5bc09277f19ea51344ad2ea3ae100f2
|
[
"MIT"
] | null | null | null |
from .taggable_sequence import TaggableSequence
from .taggable_sequence import TaggedSegment
| 46
| 47
| 0.902174
| 10
| 92
| 8.1
| 0.6
| 0.296296
| 0.493827
| 0.641975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 92
| 2
| 48
| 46
| 0.952941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8e39841edef995cf39cc312dc6a7be388770fc39
| 96
|
py
|
Python
|
backend/optimizer.py
|
MaxLinCode/tardy-HackIllinois-2017
|
b38ad13e9046bb20814c60e9c1759b60ec709391
|
[
"MIT"
] | null | null | null |
backend/optimizer.py
|
MaxLinCode/tardy-HackIllinois-2017
|
b38ad13e9046bb20814c60e9c1759b60ec709391
|
[
"MIT"
] | null | null | null |
backend/optimizer.py
|
MaxLinCode/tardy-HackIllinois-2017
|
b38ad13e9046bb20814c60e9c1759b60ec709391
|
[
"MIT"
] | null | null | null |
def ind_cost(guess, actual):
return max(abs(guess - actual), (86400 - abs(guess - actual)))
| 32
| 66
| 0.666667
| 14
| 96
| 4.5
| 0.642857
| 0.52381
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.166667
| 96
| 2
| 67
| 48
| 0.725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6d40792fa1fedb6ee87c31e6fb93bfa09365633c
| 13,551
|
py
|
Python
|
ghostwriter/ghtest/test_postarticle.py
|
arthurmco/ghostwriter
|
f6040846c28a08bf1b39de19bd2470772eb89080
|
[
"MIT"
] | null | null | null |
ghostwriter/ghtest/test_postarticle.py
|
arthurmco/ghostwriter
|
f6040846c28a08bf1b39de19bd2470772eb89080
|
[
"MIT"
] | 4
|
2017-07-23T02:12:39.000Z
|
2017-10-01T03:55:02.000Z
|
ghostwriter/ghtest/test_postarticle.py
|
arthurmco/ghostwriter
|
f6040846c28a08bf1b39de19bd2470772eb89080
|
[
"MIT"
] | null | null | null |
import unittest
from ghostwriter import app, mm
#
# Post basic test fixture(?)
# Copyright (C) 2017 Arthur M
#
class PostArticleTestCase(unittest.TestCase):
from flask import json
def setUp(self):
mm.setDatabaseURI('sqlite:////tmp/unittest.db')
mm.init()
mm.create()
self.app = app.test_client()
self.username = ""
self.password = ""
self.create_user()
def tearDown(self):
mm.drop()
def create_user(self):
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
self.username = 'malakoi'
self.password = 'dandoboura'
u = User(self.username)
umng = UserManager()
umng.addUser(u, self.password)
def authenticate(self):
res = self.app.post('/admin/login',
data = {
'username': self.username,
'password': self.password
}, follow_redirects=True)
self.assertEqual(res.status, "200 OK")
def deauthenticate(self):
res = self.app.get('/admin/logoff', follow_redirects=True)
self.assertEqual(res.status, "200 OK")
def test_create_blog_post_unauthenticated(self):
res = self.app.post('/api/post/create/',
data = {
'title': "This won't work"
}, follow_redirects=True)
self.assertEqual(res.status, "401 UNAUTHORIZED")
def test_create_blog_post_authenticated(self):
self.authenticate()
res = self.app.post('/api/post/create/',
data = {
'title': "This will work"
}, follow_redirects=True)
self.assertEqual(res.status, "200 OK")
self.deauthenticate()
def test_create_and_read_blog_post(self):
from flask import json
self.authenticate()
res = self.app.post('/api/post/create/',
data = {
'title': "This will maybe work"
}, follow_redirects=True)
self.assertEqual(res.status, "200 OK")
create_post_data = json.loads(res.data)
res = self.app.get('/api/post/'+str(create_post_data['id'])+'/',
follow_redirects=True)
get_post_data = json.loads(res.data)
self.assertEqual(get_post_data['id'], create_post_data['id'])
self.assertEqual(get_post_data['title'], create_post_data['title'])
self.assertEqual(get_post_data['creation_date'], create_post_data['creation_date'])
self.assertEqual(get_post_data['summary'], create_post_data['summary'])
self.assertEqual(1, get_post_data['owner']['id'])
self.assertEqual(self.username, get_post_data['owner']['name'])
self.deauthenticate()
def test_get_content(self):
self.authenticate()
from ghostwriter.Post import Post, PostManager
from flask import json
p = Post(1, 'Get Content Test')
p.setContent('Post content')
pm = PostManager()
pm.addPost(p)
res = self.app.get('/api/post/'+str(p.ID)+'/content',
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = res.data
self.assertEqual(b'Post content', post_data)
self.deauthenticate()
def test_set_and_get_content(self):
self.authenticate()
from ghostwriter.Post import Post, PostManager
from flask import json
p = Post(1, 'Get Content Test')
p.setContent('Post content')
pm = PostManager()
pm.addPost(p)
res = self.app.put('/api/post/'+str(p.ID)+'/content',
data = {
'content': 'New Post content'
},
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
res = self.app.get('/api/post/'+str(p.ID)+'/content',
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = res.data
self.assertEqual(b'New Post content', post_data)
self.deauthenticate()
def test_set_and_get_metadata(self):
self.authenticate()
from ghostwriter.Post import Post, PostManager
from flask import json
p = Post(1, 'Get Meta Test')
p.setContent('Post content')
pm = PostManager()
pm.addPost(p)
res = self.app.put('/api/post/'+str(p.ID)+'/',
data = {
'title': 'New Meta Test'
},
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
res = self.app.get('/api/post/'+str(p.ID)+'/',
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual('New Meta Test', post_data['title'])
self.deauthenticate()
def test_delete_blog_post(self):
self.authenticate()
from ghostwriter.Post import Post, PostManager
from flask import json
p = Post(1, 'Get Content Test')
p.setContent('Post content')
pm = PostManager()
pm.addPost(p)
res = self.app.delete('/api/post/'+str(p.ID)+'/',
follow_redirects=True)
self.assertEqual(res.status, '200 OK')
res = self.app.delete('/api/post/'+str(p.ID)+'/',
follow_redirects=True)
self.assertEqual(res.status, '404 NOT FOUND')
#
# Post composition
class PostComposeTestCase(unittest.TestCase):
from flask import json
def setUp(self):
mm.setDatabaseURI('sqlite:////tmp/unittest.db')
mm.init()
mm.create()
self.app = app.test_client()
self.user = self.create_user('test', 'test')
def tearDown(self):
mm.drop()
def create_user(self, username, password):
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
u = User(username)
umng = UserManager()
umng.addUser(u, password)
return u
def create_post(self, title, body, author, cdate=None):
from ghostwriter.Post import Post, PostManager
po = Post(author.uid, title, cdate)
po.setContent(body)
return po
def testIfSummaryCorrect(self):
from ghostwriter.Post import Post
p = self.create_post("New Post",
""" This is a big summary
Note that we will have a lot of lines, but it finish here.
Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet """, self.user)
cdata = p.getSummary()
self.assertEqual('.', cdata[-1])
self.assertNotEqual('...', cdata[-3:])
#
# Post search tests
class PostSearchTestCase(unittest.TestCase):
from flask import json
def setUp(self):
mm.setDatabaseURI('sqlite:////tmp/unittest.db')
mm.init()
mm.create()
self.app = app.test_client()
self.user = self.create_user('test', 'test')
def tearDown(self):
mm.drop()
def create_user(self, username, password):
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
u = User(username)
umng = UserManager()
umng.addUser(u, password)
return u
def create_post(self, title, body, author, cdate=None):
from ghostwriter.Post import Post, PostManager
po = Post(author.uid, title, cdate)
po.setContent(body)
pm = PostManager()
pm.addPost(po)
def test_searchbyTitle(self):
import json
self.create_post("Search One", "Post Search One", self.user)
self.create_post("Normal One", "Post Normal One", self.user)
self.create_post("Search Two", "Post Search Two", self.user)
self.create_post("Normal Two", "Post Normal Two", self.user)
self.create_post("Search THree", "Post Search Three", self.user)
self.create_post("Normal Three", "Post Normal Three", self.user)
self.create_post("What is this", "Post different", self.user)
res = self.app.get('/api/post/search',
query_string = {
'title': 'Search'
}, follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(3, len(post_data))
def test_searchAllNoneFound(self):
import json
other = self.create_user('other', 'other')
res = self.app.get('/api/posts', follow_redirects=True)
self.assertEqual(res.status, '404 NOT FOUND')
def test_searchAll(self):
import json
other = self.create_user('other', 'other')
self.create_post("Search One", "Post Search One", self.user)
self.create_post("Normal One", "Post Normal One", self.user)
self.create_post("Search Two", "Post Search Two", other)
self.create_post("Normal Two", "Post Normal Two", other)
self.create_post("Search THree", "Post Search Three", other)
self.create_post("Normal Three", "Post Normal Three", other)
self.create_post("What is this", "Post different", self.user)
res = self.app.get('/api/posts', follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(7, len(post_data))
def test_searchbyAuthor(self):
import json
other = self.create_user('other', 'other')
self.create_post("Search One", "Post Search One", self.user)
self.create_post("Normal One", "Post Normal One", self.user)
self.create_post("Search Two", "Post Search Two", other)
self.create_post("Normal Two", "Post Normal Two", other)
self.create_post("Search THree", "Post Search Three", other)
self.create_post("Normal Three", "Post Normal Three", other)
self.create_post("What is this", "Post different", self.user)
res = self.app.get('/api/user/1/posts', follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(3, len(post_data))
def test_searchbyDate(self):
from datetime import datetime
import json
self.create_post("Search One", "Post Search One", self.user,
datetime(2017, 7, 1, 1))
self.create_post("Normal One", "Post Normal One", self.user)
self.create_post("Search Two", "Post Search Two", self.user,
datetime(2017, 7, 1, 2))
self.create_post("Normal Two", "Post Normal Two", self.user)
self.create_post("Search THree", "Post Search Three", self.user,
datetime(2017, 7, 1, 3))
self.create_post("Normal Three", "Post Normal Three", self.user)
self.create_post("What is this", "Post different", self.user,
datetime(2017, 7, 1, 4))
res = self.app.get('/api/post/search',
query_string = {
'cdate': '2017-7-1',
}, follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(4, len(post_data))
def test_searchbyTitleandAuthor(self):
other = self.create_user('other', 'other')
import json
self.create_post("Search One", "Post Search One", self.user)
self.create_post("Normal One", "Post Normal One", self.user)
self.create_post("Search Two", "Post Search Two", other)
self.create_post("Normal Two", "Post Normal Two", other)
self.create_post("Search THree", "Post Search Three", self.user)
self.create_post("Normal Three", "Post Normal Three", other)
self.create_post("What is this", "Post different", self.user)
res = self.app.get('/api/user/1/posts/search',
query_string = {
'title': 'Search',
}, follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(2, len(post_data))
def test_searchbyDateandAuthor(self):
from datetime import datetime
import json
other = self.create_user('other', 'other')
self.create_post("Search One", "Post Search One", other,
datetime(2017, 7, 1, 1))
self.create_post("Normal One", "Post Normal One", other)
self.create_post("Search Two", "Post Search Two", self.user,
datetime(2017, 7, 1, 2))
self.create_post("Normal Two", "Post Normal Two", other)
self.create_post("Search THree", "Post Search Three", self.user,
datetime(2017, 7, 1, 3))
self.create_post("Normal Three", "Post Normal Three", other)
self.create_post("What is this", "Post different", self.user,
datetime(2017, 7, 1, 4))
res = self.app.get('/api/user/1/posts/search',
query_string = {
'cdate': '2017-7-1',
}, follow_redirects=True)
self.assertEqual(res.status, '200 OK')
post_data = json.loads(res.data)
self.assertEqual(3, len(post_data))
| 36.23262
| 278
| 0.590436
| 1,643
| 13,551
| 4.77115
| 0.093731
| 0.065059
| 0.076796
| 0.055747
| 0.842072
| 0.811711
| 0.799592
| 0.789004
| 0.778033
| 0.766679
| 0
| 0.01497
| 0.285219
| 13,551
| 373
| 279
| 36.329759
| 0.794342
| 0.007011
| 0
| 0.741497
| 0
| 0
| 0.161618
| 0.009637
| 0
| 0
| 0
| 0
| 0.122449
| 1
| 0.095238
| false
| 0.027211
| 0.108844
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d573bc453dd58a54c399a376e1f87e3d35c0f8e
| 11,488
|
py
|
Python
|
smirk/migrations/0001_initial.py
|
ahmsayat/CyberSeed
|
1c9368eb3849ec3e0e02c600bdf813cedcaa88c7
|
[
"MIT"
] | null | null | null |
smirk/migrations/0001_initial.py
|
ahmsayat/CyberSeed
|
1c9368eb3849ec3e0e02c600bdf813cedcaa88c7
|
[
"MIT"
] | 5
|
2017-09-26T05:12:06.000Z
|
2017-10-27T05:32:27.000Z
|
smirk/migrations/0001_initial.py
|
ahmsayat/CyberSeed
|
1c9368eb3849ec3e0e02c600bdf813cedcaa88c7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-10-08 01:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Diagnosis_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True, verbose_name='Date of exam')),
('Diagnosis', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Doctor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Practice_Name', models.CharField(max_length=200)),
('Practice_Address', models.CharField(max_length=200)),
('Recovery_Phrase', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Doctor_Exam_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True, verbose_name='Date of exam')),
('Notes', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Doctor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Insurance_Administrator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Company_Name', models.CharField(max_length=200)),
('Company_Address', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Insurance_Claim_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True, verbose_name='Date of exam')),
('Amount', models.FloatField(default=0.0)),
('Status', models.CharField(choices=[('Filed', 'Filed'), ('Examining', 'Examining'), ('Rejected', 'Rejected'), ('Accepted', 'Accepted'), ('Paid', 'Paid')], max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Medical_Administrator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Medical_Administrator_handling_claim_for_doctor', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Medical_Administrator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Practice_Name', models.CharField(max_length=200)),
('Practice_Address', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Associated_Doctors', models.ManyToManyField(to='smirk.Doctor')),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True, verbose_name='Note Date')),
('Text', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
],
),
migrations.CreateModel(
name='Nurse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Practice_Name', models.CharField(max_length=200)),
('Practice_Address', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Associated_Doctors', models.ManyToManyField(to='smirk.Doctor')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('SSN', models.CharField(max_length=200)),
('Address', models.CharField(max_length=200)),
('DOB', models.DateTimeField(verbose_name='Date')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Patient_Doctor_Correspondence_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Doctor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Doctor', to=settings.AUTH_USER_MODEL)),
('Notes', models.ManyToManyField(to='smirk.Note')),
],
),
migrations.CreateModel(
name='Raw_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Description', models.CharField(max_length=200)),
('File', models.FileField(upload_to='documents')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
],
),
migrations.CreateModel(
name='Record',
fields=[
('Record_ID', models.AutoField(primary_key=True, serialize=False)),
('Record_Type', models.CharField(choices=[(b'Doctor Exam', b'Doctor Exam'), (b'Test Result', b'Test Result'), (b'Diagnosis', b'Diagnosis'), (b'Insurance Claim', b'Insurance Claim'), (b'Patient Doctor Correspondence', b'Patient Doctor Correspondence'), (b'Raw', b'Raw')], default='Doctor Exam', max_length=200)),
('Record_Date', models.DateTimeField(auto_now=True, verbose_name='Record_Date')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Edit_Permissions', models.ManyToManyField(related_name='Edit_Permissions', to=settings.AUTH_USER_MODEL)),
('Owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Owner', to=settings.AUTH_USER_MODEL)),
('Patient', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Patient', to=settings.AUTH_USER_MODEL)),
('View_Permissions', models.ManyToManyField(related_name='View_Permissions', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='System_Administrator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(verbose_name='Date')),
],
),
migrations.CreateModel(
name='Test_Results_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True, verbose_name='Date of exam')),
('Lab', models.CharField(max_length=200)),
('Notes', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Date')),
('Doctor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('Record', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record')),
],
),
migrations.AddField(
model_name='raw_record',
name='Record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record'),
),
migrations.AddField(
model_name='patient_doctor_correspondence_record',
name='Record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record'),
),
migrations.AddField(
model_name='note',
name='Patient_Doctor_Correspondence',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Patient_Doctor_Correspondence_Record'),
),
migrations.AddField(
model_name='medical_administrator',
name='Associated_Nurses',
field=models.ManyToManyField(to='smirk.Nurse'),
),
migrations.AddField(
model_name='medical_administrator',
name='username',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='insurance_claim_record',
name='Record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record'),
),
migrations.AddField(
model_name='doctor_exam_record',
name='Record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record'),
),
migrations.AddField(
model_name='diagnosis_record',
name='Record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smirk.Record'),
),
]
| 56.591133
| 327
| 0.613945
| 1,207
| 11,488
| 5.64623
| 0.101906
| 0.054879
| 0.041086
| 0.064563
| 0.82876
| 0.772854
| 0.745708
| 0.730741
| 0.710051
| 0.710051
| 0
| 0.008642
| 0.244516
| 11,488
| 202
| 328
| 56.871287
| 0.776587
| 0.005745
| 0
| 0.680412
| 1
| 0
| 0.14651
| 0.029863
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020619
| 0
| 0.041237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6433335105492dc7ca38ff197d0006d04f33f62
| 109
|
py
|
Python
|
formal_grammars/color.py
|
magnickolas/formal-grammars
|
65fbee79f893876f9fb390b04aa11c037613a8f5
|
[
"MIT"
] | null | null | null |
formal_grammars/color.py
|
magnickolas/formal-grammars
|
65fbee79f893876f9fb390b04aa11c037613a8f5
|
[
"MIT"
] | null | null | null |
formal_grammars/color.py
|
magnickolas/formal-grammars
|
65fbee79f893876f9fb390b04aa11c037613a8f5
|
[
"MIT"
] | null | null | null |
def green(text):
return f"\033[92m{text}\033[0m"
def yellow(text):
return f"\033[93m{text}\033[0m"
| 15.571429
| 35
| 0.633028
| 20
| 109
| 3.45
| 0.5
| 0.289855
| 0.318841
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197802
| 0.165138
| 109
| 6
| 36
| 18.166667
| 0.56044
| 0
| 0
| 0
| 0
| 0
| 0.385321
| 0.385321
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
b6a50f3337ebf02efe8d161b7e75d578ff634d7b
| 4,725
|
py
|
Python
|
experiment-3/make_design_files_for_power_analyses.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | 1
|
2021-12-20T13:30:23.000Z
|
2021-12-20T13:30:23.000Z
|
experiment-3/make_design_files_for_power_analyses.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | 14
|
2020-12-21T15:58:45.000Z
|
2022-03-16T22:20:25.000Z
|
experiment-3/make_design_files_for_power_analyses.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
import os.path as op
from glob import glob
from os import mkdir
from shutil import copyfile
def make_image_file():
design_file = "design.fsf"
# Each file
gp_mem = "# Group membership for input {0}\nset fmri(groupmem.{0}) 1\n"
hi_thing = "# Higher-level EV value for EV 1 and input {0}\nset fmri(evg{0}.1) 1\n"
f_thing = '# 4D AVW data or FEAT directory ({n})\nset feat_files({n}) "{f}"\n'
# Once
n_fls = "# Number of first-level analyses\nset fmri(multiple) {0}\n"
out = '# Output directory\nset fmri(outputdir) "{0}"\n'
n_vols = "# Total volumes\nset fmri(npts) {0}\n"
# Get files
in_dir = "/home/data/hcp/"
subdir = "MNINonLinear/Results/tfMRI_WM/tfMRI_WM_hp200_s4_level2vol.feat"
subjects = glob(op.join(in_dir, "*"))
subjects = [op.basename(s) for s in subjects]
subjects = sorted([s for s in subjects if s.isdigit()])
feat_dirs = []
for s in subjects:
feat_dir = op.join(in_dir, s, subdir)
if op.isdir(feat_dir):
feat_dirs.append(feat_dir)
n = len(feat_dirs)
# 0back - fixation
cope_files = [op.join(fd, "cope10.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/visual/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "visual_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "visual_power_analysis_design.fsf"),
"visual_power_analysis_design.fsf",
)
def make_fingertapping_files():
design_file = "design.fsf"
# Each file
gp_mem = "# Group membership for input {0}\nset fmri(groupmem.{0}) 1\n"
hi_thing = "# Higher-level EV value for EV 1 and input {0}\nset fmri(evg{0}.1) 1\n"
f_thing = '# 4D AVW data or FEAT directory ({n})\nset feat_files({n}) "{f}"\n'
# Once
n_fls = "# Number of first-level analyses\nset fmri(multiple) {0}\n"
out = '# Output directory\nset fmri(outputdir) "{0}"\n'
n_vols = "# Total volumes\nset fmri(npts) {0}\n"
# Get files
in_dir = "/home/data/hcp/"
subdir = "MNINonLinear/Results/tfMRI_MOTOR/tfMRI_MOTOR_hp200_s4_level2vol.feat"
subjects = glob(op.join(in_dir, "*"))
subjects = [op.basename(s) for s in subjects]
subjects = sorted([s for s in subjects if s.isdigit()])
# Contrast 10 is LH-AVG
# Contrast 12 is RH-AVG
feat_dirs = []
for s in subjects:
feat_dir = op.join(in_dir, s, subdir)
if op.isdir(feat_dir):
feat_dirs.append(feat_dir)
n = len(feat_dirs)
# Left hand
cope_files = [op.join(fd, "cope10.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/motor-lh/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "motor_lh_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "motor_lh_power_analysis_design.fsf"),
"motor_lh_power_analysis_design.fsf",
)
# Right hand
cope_files = [op.join(fd, "cope12.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/motor-rh/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "motor_rh_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "motor_rh_power_analysis_design.fsf"),
"motor_rh_power_analysis_design.fsf",
)
| 29.347826
| 87
| 0.593439
| 736
| 4,725
| 3.641304
| 0.164402
| 0.044776
| 0.040299
| 0.023507
| 0.920896
| 0.910448
| 0.879478
| 0.879478
| 0.879478
| 0.866791
| 0
| 0.018729
| 0.25418
| 4,725
| 160
| 88
| 29.53125
| 0.741771
| 0.027725
| 0
| 0.794872
| 0
| 0.017094
| 0.299235
| 0.130929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0
| 0.034188
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcb05adc2cd8fa6c070a1e0a8534bb981f26cc55
| 2,773
|
py
|
Python
|
djangocms_charts/migrations/0002_add_chart_position.py
|
l1f7/djangocms-charts
|
6de3d35758da39bda175406817ddbd1b0b0d5c59
|
[
"MIT"
] | 5
|
2019-04-14T01:28:22.000Z
|
2020-11-09T10:48:13.000Z
|
djangocms_charts/migrations/0002_add_chart_position.py
|
mcldev/djangocms-charts
|
3c10286612af5b2f6179af8e7dc7e10407fe6f6e
|
[
"MIT"
] | 1
|
2017-07-11T19:08:01.000Z
|
2018-12-22T15:38:39.000Z
|
djangocms_charts/migrations/0002_add_chart_position.py
|
mcldev/djangocms-charts
|
3c10286612af5b2f6179af8e7dc7e10407fe6f6e
|
[
"MIT"
] | 4
|
2019-07-05T05:36:53.000Z
|
2021-01-08T17:04:59.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_charts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='chartjsbarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjsdoughnutmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjslinemodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjspiemodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjspolarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjsradarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AlterField(
model_name='chartjsbarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjsdoughnutmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjslinemodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjspiemodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjspolarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjsradarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
]
| 36.973333
| 95
| 0.611973
| 256
| 2,773
| 6.433594
| 0.136719
| 0.065574
| 0.123862
| 0.204007
| 0.79235
| 0.79235
| 0.79235
| 0.780814
| 0.780814
| 0.780814
| 0
| 0.01995
| 0.276956
| 2,773
| 74
| 96
| 37.472973
| 0.801496
| 0
| 0
| 0.895522
| 0
| 0
| 0.207942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcc25bf61c176e60b03809db70f8c5e3f540ca19
| 99
|
py
|
Python
|
yesg/__init__.py
|
Lienus10/yesg
|
46e684745d03cc82651f2f0b110222e8440f429c
|
[
"MIT"
] | null | null | null |
yesg/__init__.py
|
Lienus10/yesg
|
46e684745d03cc82651f2f0b110222e8440f429c
|
[
"MIT"
] | null | null | null |
yesg/__init__.py
|
Lienus10/yesg
|
46e684745d03cc82651f2f0b110222e8440f429c
|
[
"MIT"
] | null | null | null |
from .main import get_esg_short
from .main import get_esg_full
from .main import get_historic_esg
| 19.8
| 34
| 0.838384
| 18
| 99
| 4.277778
| 0.444444
| 0.311688
| 0.545455
| 0.662338
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 99
| 4
| 35
| 24.75
| 0.895349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
1e21935ad05a3e22ab493070b022e1f59facb320
| 65
|
py
|
Python
|
digital/common/policies/__init__.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
digital/common/policies/__init__.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
digital/common/policies/__init__.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
import itertools
def list_rules():
return itertools.chain()
| 13
| 28
| 0.738462
| 8
| 65
| 5.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169231
| 65
| 5
| 28
| 13
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
1e841dc895711459461cf19b50a2ec4264cab0f1
| 2,664
|
py
|
Python
|
index_server/containers/bgsplit_trainer/test.py
|
jeremyephron/forager
|
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
|
[
"MIT"
] | 1
|
2020-12-01T23:25:58.000Z
|
2020-12-01T23:25:58.000Z
|
index_server/containers/bgsplit_trainer/test.py
|
jeremyephron/forager
|
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
|
[
"MIT"
] | 2
|
2020-10-07T01:03:06.000Z
|
2020-10-12T19:08:55.000Z
|
index_server/containers/bgsplit_trainer/test.py
|
jeremyephron/forager
|
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
|
[
"MIT"
] | null | null | null |
from main import TrainingJob
import threading
import os.path
def main():
working_lock = threading.Lock()
working_lock.acquire()
payload = {'train_positive_paths': ['waymo/train/1506904092688646_front.jpeg', 'waymo/train/1506904093682819_front.jpeg', 'waymo/train/1506904094676800_front.jpeg', 'waymo/train/1506904095672707_front.jpeg'], 'train_negative_paths': ['waymo/train/1506904088695574_front.jpeg', 'waymo/train/1506904089697010_front.jpeg', 'waymo/train/1506904090696344_front.jpeg', 'waymo/train/1506904091693725_front.jpeg', 'waymo/train/1507239497145438_front.jpeg', 'waymo/train/1552675808778089_front.jpeg', 'waymo/train/1550004504651292_front.jpeg', 'waymo/train/1508086852953325_front.jpeg', 'waymo/train/1557962360312397_front.jpeg', 'waymo/train/1506906090680412_front.jpeg', 'waymo/train/1552660419799043_front.jpeg', 'waymo/train/1553701514387735_front.jpeg', 'waymo/train/1557546527922405_front.jpeg', 'waymo/train/1521941572115983_front.jpeg', 'waymo/train/1553552806285759_front.jpeg', 'waymo/train/1512860036529199_front.jpeg', 'waymo/train/1550192058374415_front.jpeg', 'waymo/train/1559178305737499_front.jpeg', 'waymo/train/1521998637758363_front.jpeg', 'waymo/train/1506959820627388_front.jpeg', 'waymo/train/1553904019686166_front.jpeg', 'waymo/train/1557335968649038_front.jpeg', 'waymo/train/1507253770103541_front.jpeg', 'waymo/train/1554139647204272_front.jpeg', 'waymo/train/1557335709555987_front.jpeg',], 'train_unlabeled_paths': ['waymo/train/1553206988074568_front.jpeg', 'waymo/train/1546577006594417_front.jpeg', 'waymo/train/1554306446401663_front.jpeg'], 'val_positive_paths': [], 'val_negative_paths': [], 'val_unlabeled_paths': [], 'model_kwargs': {'max_ram': 37580963840, 'aux_labels_path': 'https://storage.googleapis.com/foragerml/aux_labels/2d2b13f9-3b30-4e51-8ab9-4e8a03ba1f03/imagenet.pickle'}, 'model_id': '2d7cda19-8732-4002-9a53-0a32b92dfb66', 'model_name': 'BGSPLIT', 'notify_url': 'http://34.82.7.82:5000/bgsplit_trainer_status'}
payload['train_positive_paths'] = \
[os.path.join('https://storage.googleapis.com/foragerml', x)
for x in payload['train_positive_paths']]
payload['train_negative_paths'] = \
[os.path.join('https://storage.googleapis.com/foragerml', x)
for x in payload['train_negative_paths']]
payload['train_unlabeled_paths'] = \
[os.path.join('https://storage.googleapis.com/foragerml', x)
for x in payload['train_unlabeled_paths']]
payload['_lock'] = working_lock
payload['model_kwargs']['use_cuda'] = False
current_job = TrainingJob(**payload)
current_job.run()
if __name__ == "__main__":
main()
| 106.56
| 1,858
| 0.771396
| 321
| 2,664
| 6.146417
| 0.311526
| 0.16219
| 0.205778
| 0.27927
| 0.120628
| 0.103396
| 0.103396
| 0.103396
| 0.103396
| 0.103396
| 0
| 0.234432
| 0.077703
| 2,664
| 24
| 1,859
| 111
| 0.56858
| 0
| 0
| 0.136364
| 0
| 0.045455
| 0.710586
| 0.505631
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1eb441e0e81b46594ab507e5c2b1c179c6992ef9
| 41
|
py
|
Python
|
bot/handlers/users/__init__.py
|
famaxth/Russian-Qiwi-Bot
|
d5b0f23516343205ca7bad15b2d2fae7b675f584
|
[
"MIT"
] | null | null | null |
bot/handlers/users/__init__.py
|
famaxth/Russian-Qiwi-Bot
|
d5b0f23516343205ca7bad15b2d2fae7b675f584
|
[
"MIT"
] | null | null | null |
bot/handlers/users/__init__.py
|
famaxth/Russian-Qiwi-Bot
|
d5b0f23516343205ca7bad15b2d2fae7b675f584
|
[
"MIT"
] | null | null | null |
from . import start
from . import main
| 13.666667
| 20
| 0.707317
| 6
| 41
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243902
| 41
| 2
| 21
| 20.5
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1ebe07cee0735a77fe20dd6ba2303f3cd5efec9e
| 76,053
|
py
|
Python
|
src/deep_noise_to_image_models.py
|
furgerf/GAN-for-dermatologic-imaging
|
e90b06c46c7693e984a4c5b067e18460113cd23b
|
[
"Apache-2.0"
] | null | null | null |
src/deep_noise_to_image_models.py
|
furgerf/GAN-for-dermatologic-imaging
|
e90b06c46c7693e984a4c5b067e18460113cd23b
|
[
"Apache-2.0"
] | 9
|
2020-09-26T01:22:00.000Z
|
2022-01-22T18:00:52.000Z
|
src/deep_noise_to_image_models.py
|
furgerf/GAN-for-dermatologic-imaging
|
e90b06c46c7693e984a4c5b067e18460113cd23b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=too-many-locals,arguments-differ,unused-import
import tensorflow as tf
from tensorflow.keras.layers import (BatchNormalization, Dense, Dropout,
Flatten, MaxPooling2D, SpatialDropout2D,
add)
from tensorflow.nn import leaky_relu, relu, tanh
from deep_model_blocks import (BottleneckResidualBlock, Conv, ConvBlock,
Deconv, DeconvBlock, ResidualBlock, ResizeBlock,
ReverseBottleneckResidualBlock,
ReverseResidualBlock, UBlock)
from model import Model
class Deep480pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoise.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoise.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseFancyFilters(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseFancyFilters.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 3, 2),
# ConvBlock(initial_filters*16, 3, 1),
DeconvBlock(initial_filters*16, 3, 2),
ConvBlock(initial_filters*8, 3, 1),
DeconvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*4, 7, 1),
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*2, 7, 1),
DeconvBlock(initial_filters*2, 7, 2),
# ConvBlock(initial_filters*1, 7, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseFancyFilters.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseThreeSteps(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseThreeSteps.Generator, self).__init__()
initial_filters = 32
self.fc_shape = (60, 80, 16)
self.fc = tf.keras.layers.Dense(self.fc_shape[0]*self.fc_shape[1]*self.fc_shape[2], use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*2, 3, 1),
ConvBlock(initial_filters*2, 3, 1),
DeconvBlock(initial_filters*2, 7, 2),
ConvBlock(initial_filters*1, 3, 1),
ConvBlock(initial_filters*1, 3, 1),
DeconvBlock(initial_filters*1, 7, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, self.fc_shape[0], self.fc_shape[1], self.fc_shape[2]))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseThreeSteps.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseSmallerGenLayer(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayer.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*8, 5, 1),
# ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayer.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseSmallerGenLayerFancyFilters(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayerFancyFilters.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 7, 2),
# ConvBlock(initial_filters*8, 3, 1),
# ConvBlock(initial_filters*8, 3, 1),
DeconvBlock(initial_filters*16, 7, 2),
ConvBlock(initial_filters*4, 3, 1),
ConvBlock(initial_filters*4, 3, 1),
DeconvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*2, 3, 1),
ConvBlock(initial_filters*2, 3, 1),
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*1, 3, 1),
ConvBlock(initial_filters*1, 3, 1),
DeconvBlock(initial_filters*2, 7, 2),
# ConvBlock(initial_filters*1, 3, 1),
# ConvBlock(initial_filters*1, 3, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayerFancyFilters.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseNoDeconv(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseNoDeconv.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ResizeBlock((30, 40), initial_filters*32, 5),
# ConvBlock(initial_filters*16, 5, 1),
ResizeBlock((60, 80), initial_filters*16, 5),
ConvBlock(initial_filters*8, 5, 1),
ResizeBlock((120, 160), initial_filters*8, 5),
ConvBlock(initial_filters*4, 5, 1),
ResizeBlock((240, 320), initial_filters*4, 5),
ConvBlock(initial_filters*2, 5, 1),
ResizeBlock((480, 640), initial_filters*2, 5),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseNoDeconv.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseResidual(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResidual.Generator, self).__init__()
initial_filters = int(512/32)//2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ReverseResidualBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*2*16, 5, 1),
ReverseResidualBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*2*8, 5, 1),
ReverseResidualBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*2*4, 5, 1),
ReverseResidualBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2*2, 5, 1),
ReverseResidualBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*2*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResidual.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# default
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
# # more filters in deconv
# DeconvBlock(initial_filters*32*2, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
# DeconvBlock(initial_filters*16*2, 5, 2),
# ConvBlock(initial_filters*8, 5, 1),
# DeconvBlock(initial_filters*8*2, 5, 2),
# ConvBlock(initial_filters*4, 5, 1),
# DeconvBlock(initial_filters*4*2, 5, 2),
# ConvBlock(initial_filters*2, 5, 1),
# DeconvBlock(initial_filters*2, 5, 2),
# # ConvBlock(initial_filters*1, 5, 1),
# # more filters in conv
# DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16*2, 5, 1),
# DeconvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*8*2, 5, 1),
# DeconvBlock(initial_filters*8, 5, 2),
# ConvBlock(initial_filters*4*2, 5, 1),
# DeconvBlock(initial_filters*4, 5, 2),
# ConvBlock(initial_filters*2*2, 5, 1),
# DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
# default
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 4, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*32, 4, 2),
ConvBlock(initial_filters*16, 4, 1),
# # more filters in unstrided
# ConvBlock(initial_filters*2, 4, 2),
# ConvBlock(initial_filters*1*2, 5, 1),
# ConvBlock(initial_filters*4, 4, 2),
# ConvBlock(initial_filters*2*2, 5, 1),
# ConvBlock(initial_filters*8, 4, 2),
# ConvBlock(initial_filters*4*2, 5, 1),
# # NOTE: keep track of image resizing+conv!
# ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*8*2, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMultiscaleDiscGenLarge(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscGenLarge.Generator, self).__init__()
initial_filters = int(512/32/2)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
x = tanh(self.final_conv(x))
return tf.image.resize_nearest_neighbor(x, (480, 640))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
# default
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 5, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDiscGenLarge.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMultiscaleDiscShallow(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscShallow.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
DeconvBlock(initial_filters*16, 5, 2),
DeconvBlock(initial_filters*8, 5, 2),
DeconvBlock(initial_filters*4, 5, 2),
DeconvBlock(initial_filters*2, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
self.scaling_factor = scaling_factor
self.resize = None
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
if self.resize is None:
if self.scaling_factor != 1:
size_x = int(x.shape[1].value * self.scaling_factor)
size_y = int(x.shape[2].value * self.scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDiscShallow.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseResizeMultiscaleDiscShallow(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# ResizeBlock((30, 40), initial_filters*32, 5),
# ResizeBlock((60, 80), initial_filters*16, 5),
# ResizeBlock((120, 160), initial_filters*8, 5),
# ResizeBlock((240, 320), initial_filters*4, 5),
# ResizeBlock((480, 640), initial_filters*2, 5),
DeconvBlock(initial_filters*32, 5, 2),
DeconvBlock(initial_filters*16, 5, 2),
DeconvBlock(initial_filters*8, 5, 2),
DeconvBlock(initial_filters*4, 5, 2),
DeconvBlock(initial_filters*2, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
self.scaling_factor = scaling_factor
self.resize = None
initial_filters = 32//2
self.blocks = [
# resize is on smaller resolution so that it fits in memory...
ResizeBlock((240, 320), initial_filters*2, 5),
ResizeBlock((120, 160), initial_filters*4, 5),
ResizeBlock((60, 80), initial_filters*8, 5),
ResizeBlock((30, 40), initial_filters*16, 5),
ResizeBlock((15, 20), initial_filters*32, 5),
# ConvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*4, 5, 2),
# ConvBlock(initial_filters*8, 5, 2),
# ConvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
if self.resize is None:
if self.scaling_factor != 1:
size_x = int(x.shape[1].value * self.scaling_factor)
size_y = int(x.shape[2].value * self.scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep60pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoise.Discriminator, self).__init__()
initial_filters = 32 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep60pNoiseDeeper(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoiseDeeper.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ConvBlock(initial_filters*32, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoiseDeeper.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoise.Discriminator, self).__init__()
initial_filters = 32 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep120pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(160 * scaling_factor)
size_y = int(120 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//1 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep120pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep120pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep120pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep120pNoiseDeeper(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseDeeper.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseDeeper.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoiseShallowGenMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseShallowGenMultiscaleDisc.Generator, self).__init__()
initial_filters = 1024
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters, 5, 2),
DeconvBlock(initial_filters, 5, 2),
DeconvBlock(initial_filters, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(160 * scaling_factor)
size_y = int(120 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//1 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep120pNoiseShallowGenMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep240pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoise.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep240pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep240pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(320 * scaling_factor)
size_y = int(240 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
# ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
# ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
# ConvBlock(initial_filters*8, 5, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
# ConvBlock(initial_filters*16, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep240pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep240pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep240pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2S1(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1.Generator, self).__init__()
initial_filters = int(512/32)*1
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2*1
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 4, 1),
ConvBlock(initial_filters*32, 4, 2),
ConvBlock(initial_filters*16, 4, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2.Generator, self).__init__()
initial_filters = int(512/32)*1
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2*1
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*32, 4, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2S1Shared(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Shared.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2//2
self.s2_blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*32, 5, 2),
]
self.s1_blocks = [
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
# ConvBlock(initial_filters*32, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.s2_fc = Dense(config.discriminator_classes, use_bias=False)
self.s2s1_fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
s2 = x
for block in self.s2_blocks:
s2 = block(s2, training=training)
s2 = self.dropout(s2, training=training)
s2 = self.flatten(s2)
s2 = self.s2_fc(s2)
s2s1 = x
for i in range(len(self.s1_blocks)):
s2s1 = self.s2_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.s1_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.flatten(s2s1)
s2s1 = self.s2s1_fc(s2s1)
return tf.reduce_mean(tf.concat([s2, s2s1], axis=-1), axis=-1, keepdims=True)
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1Shared.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseS2S1Shared(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseS2S1Shared.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseS2S1Shared.Discriminator, self).__init__()
initial_filters = 32//2
self.s2_blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*32, 4, 2),
]
self.s1_blocks = [
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*8, 4, 1),
ConvBlock(initial_filters*16, 4, 1),
# ConvBlock(initial_filters*32, 4, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.s2_fc = Dense(config.discriminator_classes, use_bias=False)
self.s2s1_fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
s2 = x
for block in self.s2_blocks:
s2 = block(s2, training=training)
s2 = self.dropout(s2, training=training)
s2 = self.flatten(s2)
s2 = self.s2_fc(s2)
s2s1 = x
for i in range(len(self.s1_blocks)):
s2s1 = self.s2_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.s1_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.flatten(s2s1)
s2s1 = self.s2s1_fc(s2s1)
return tf.reduce_mean(tf.concat([s2, s2s1], axis=-1), axis=-1, keepdims=True)
class Deep480pNoiseMsDiscS2S1Modified(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Modified.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 7, 2),
ConvBlock(initial_filters*2, 7, 1),
ConvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*4, 7, 1),
ConvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*8, 7, 1),
ConvBlock(initial_filters*16, 7, 2),
ConvBlock(initial_filters*16, 7, 1),
# ConvBlock(initial_filters*32, 7, 2),
# ConvBlock(initial_filters*16, 7, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1Modified.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoisePatch(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoisePatch.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoisePatch.Discriminator, self).__init__()
del config
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
# ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
# ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
# ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*8, 4, 1),
]
self.dropout = Dropout(0.3)
self.final_conv = Conv(1, 4, 1)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.final_conv(x)
x = tf.reduce_mean(x, axis=[1, 2])
return x
class Deep480pNoiseMsDiscS2EvenG(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2EvenG.Generator, self).__init__()
initial_filters = 64
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# default
DeconvBlock(initial_filters*1, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2EvenG.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
| 34.104484
| 115
| 0.624578
| 9,658
| 76,053
| 4.755332
| 0.019052
| 0.162475
| 0.174277
| 0.109739
| 0.947983
| 0.93039
| 0.907505
| 0.900168
| 0.890849
| 0.872276
| 0
| 0.052908
| 0.254441
| 76,053
| 2,229
| 116
| 34.119785
| 0.757063
| 0.044969
| 0
| 0.856694
| 0
| 0
| 0.020018
| 0
| 0
| 0
| 0
| 0
| 0.007542
| 1
| 0.087995
| false
| 0
| 0.003143
| 0.007542
| 0.187932
| 0.03017
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
94d7e7537190b59682f2beda221bebc1a5c11576
| 34,850
|
py
|
Python
|
safenet/mutableData.py
|
DuncanKushnir/pySafe
|
b47c234f5333566027c8981054747d2587a673fe
|
[
"MIT"
] | null | null | null |
safenet/mutableData.py
|
DuncanKushnir/pySafe
|
b47c234f5333566027c8981054747d2587a673fe
|
[
"MIT"
] | null | null | null |
safenet/mutableData.py
|
DuncanKushnir/pySafe
|
b47c234f5333566027c8981054747d2587a673fe
|
[
"MIT"
] | null | null | null |
import safenet.safeUtils as safeUtils
import queue
appQueue = queue.Queue()
class lib:
def __init__(self,authlib,applib,fromBytes=None):
self.safe_authenticator = authlib
self.safe_app = applib
# first attempt to define mutable Data for us
class mutableData:
def __init__(self,authlib,applib,fromBytes=None):
self.lib = lib(authlib,applib)
# defining the mutableData
if fromBytes:
self.asBytes = fromBytes
#self.ffiMutable=ffi.new('MDataInfo *') - i think the ffi-datatypes should only exist locally in our functions
# otherwise we can't pickle out own class (at least i got always faults when trying to do it with ffi classes yet)
else:
self.asBytes = None
def getffiMutable():
ffiMutable=ffi.new('MDataInfo *')
writeBuffer = ffi.buffer(self.ffiMutable)
writeBuffer[:]=self.asBytes
return ffiMutable
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_encode_metadata(self, metadata, user_data, o_cb=None):
"""
MetadataResponse*, [any], [function], [custom ffi lib]
MetadataResponse* metadata, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* encoded, uintptr_t encoded_len)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t)")
def _mdata_encode_metadata_o_cb(user_data ,result ,encoded ,encoded_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,encoded ,encoded_len)
self.lib.safe_app.mdata_encode_metadata(metadata, user_data, _mdata_encode_metadata_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_new_private(self, name, type_tag, secret_key, nonce, user_data, o_cb=None):
"""
XorNameArray*, uint64_t, SymSecretKey*, SymNonce*, [any], [function], [custom ffi lib]
XorNameArray* name, uint64_t type_tag, SymSecretKey* secret_key, SymNonce* nonce, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataInfo* mdata_info)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataInfo*)")
def _mdata_info_new_private_o_cb(user_data ,result ,mdata_info):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,mdata_info)
self.lib.safe_app.mdata_info_new_private(name, type_tag, secret_key, nonce, user_data, _mdata_info_new_private_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_random_public(self, type_tag, user_data, o_cb=None):
"""
uint64_t, [any], [function], [custom ffi lib]
uint64_t type_tag, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataInfo* mdata_info)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataInfo*)")
def _mdata_info_random_public_o_cb(user_data ,result ,mdata_info):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,mdata_info)
self.lib.safe_app.mdata_info_random_public(type_tag, user_data, _mdata_info_random_public_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_random_private(self, type_tag, user_data, o_cb=None):
"""
uint64_t, [any], [function], [custom ffi lib]
uint64_t type_tag, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataInfo* mdata_info)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataInfo*)")
def _mdata_info_random_private_o_cb(user_data ,result ,mdata_info):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,mdata_info)
self.lib.safe_app.mdata_info_random_private(type_tag, user_data, _mdata_info_random_private_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_encrypt_entry_key(self, info, input, input_len, user_data, o_cb=None):
"""
MDataInfo*, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
MDataInfo* info, uint8_t* input, uintptr_t input_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* enc_entry_key, uintptr_t enc_entry_key_len)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t)")
def _mdata_info_encrypt_entry_key_o_cb(user_data ,result ,enc_entry_key ,enc_entry_key_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,enc_entry_key ,enc_entry_key_len)
self.lib.safe_app.mdata_info_encrypt_entry_key(info, input, input_len, user_data, _mdata_info_encrypt_entry_key_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_encrypt_entry_value(self, info, input, input_len, user_data, o_cb=None):
"""
MDataInfo*, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
MDataInfo* info, uint8_t* input, uintptr_t input_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* enc_entry_value, uintptr_t enc_entry_value_len)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t)")
def _mdata_info_encrypt_entry_value_o_cb(user_data ,result ,enc_entry_value ,enc_entry_value_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,enc_entry_value ,enc_entry_value_len)
self.lib.safe_app.mdata_info_encrypt_entry_value(info, input, input_len, user_data, _mdata_info_encrypt_entry_value_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_decrypt(self, info, input, input_len, user_data, o_cb=None):
"""
MDataInfo*, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
MDataInfo* info, uint8_t* input, uintptr_t input_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* mdata_info_decrypt, uintptr_t mdata_info_decrypt_len)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t)")
def _mdata_info_decrypt_o_cb(user_data ,result ,mdata_info_decrypt ,mdata_info_decrypt_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,mdata_info_decrypt ,mdata_info_decrypt_len)
self.lib.safe_app.mdata_info_decrypt(info, input, input_len, user_data, _mdata_info_decrypt_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_serialise(self, info, user_data, o_cb=None):
"""
MDataInfo*, [any], [function], [custom ffi lib]
MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* encoded, uintptr_t encoded_len)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t)")
def _mdata_info_serialise_o_cb(user_data ,result ,encoded ,encoded_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,encoded ,encoded_len)
self.lib.safe_app.mdata_info_serialise(info, user_data, _mdata_info_serialise_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_info_deserialise(self, encoded_ptr, encoded_len, user_data, o_cb=None):
"""
uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
uint8_t* encoded_ptr, uintptr_t encoded_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataInfo* mdata_info)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataInfo*)")
def _mdata_info_deserialise_o_cb(user_data ,result ,mdata_info):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,mdata_info)
self.lib.safe_app.mdata_info_deserialise(encoded_ptr, encoded_len, user_data, _mdata_info_deserialise_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _encode_share_mdata_req(self, req, user_data, o_cb=None):
"""
ShareMDataReq*, [any], [function], [custom ffi lib]
ShareMDataReq* req, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint32_t req_id, char* encoded)
"""
@ffi.callback("void(void* ,FfiResult* ,uint32_t ,char*)")
def _encode_share_mdata_req_o_cb(user_data ,result ,req_id ,encoded):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,req_id ,encoded)
self.lib.safe_app.encode_share_mdata_req(req, user_data, _encode_share_mdata_req_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_put(self, app, info, permissions_h, entries_h, user_data, o_cb=None):
"""
App*, MDataInfo*, MDataPermissionsHandle, MDataEntriesHandle, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, MDataPermissionsHandle permissions_h, MDataEntriesHandle entries_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_put_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_put(app, info, permissions_h, entries_h, user_data, _mdata_put_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_get_version(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint64_t version)
"""
@ffi.callback("void(void* ,FfiResult* ,uint64_t)")
def _mdata_get_version_o_cb(user_data ,result ,version):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,version)
self.lib.safe_app.mdata_get_version(app, info, user_data, _mdata_get_version_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_serialised_size(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint64_t serialised_size)
"""
@ffi.callback("void(void* ,FfiResult* ,uint64_t)")
def _mdata_serialised_size_o_cb(user_data ,result ,serialised_size):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,serialised_size)
self.lib.safe_app.mdata_serialised_size(app, info, user_data, _mdata_serialised_size_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_get_value(self, app, info, key, key_len, user_data, o_cb=None):
"""
App*, MDataInfo*, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, uint8_t* key, uintptr_t key_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* content, uintptr_t content_len, uint64_t version)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t ,uint64_t)")
def _mdata_get_value_o_cb(user_data ,result ,content ,content_len ,version):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,content ,content_len ,version)
self.lib.safe_app.mdata_get_value(app, info, key, key_len, user_data, _mdata_get_value_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataEntriesHandle entries_h)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataEntriesHandle)")
def _mdata_entries_o_cb(user_data ,result ,entries_h):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,entries_h)
self.lib.safe_app.mdata_entries(app, info, user_data, _mdata_entries_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_keys(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataKey* keys, uintptr_t keys_len)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataKey* ,uintptr_t)")
def _mdata_list_keys_o_cb(user_data ,result ,keys ,keys_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,keys ,keys_len)
self.lib.safe_app.mdata_list_keys(app, info, user_data, _mdata_list_keys_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_values(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataValue* values, uintptr_t values_len)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataValue* ,uintptr_t)")
def _mdata_list_values_o_cb(user_data ,result ,values ,values_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,values ,values_len)
self.lib.safe_app.mdata_list_values(app, info, user_data, _mdata_list_values_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_mutate_entries(self, app, info, actions_h, user_data, o_cb=None):
"""
App*, MDataInfo*, MDataEntryActionsHandle, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, MDataEntryActionsHandle actions_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_mutate_entries_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_mutate_entries(app, info, actions_h, user_data, _mdata_mutate_entries_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_permissions(self, app, info, user_data, o_cb=None):
"""
App*, MDataInfo*, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataPermissionsHandle perm_h)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataPermissionsHandle)")
def _mdata_list_permissions_o_cb(user_data ,result ,perm_h):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,perm_h)
self.lib.safe_app.mdata_list_permissions(app, info, user_data, _mdata_list_permissions_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_user_permissions(self, app, info, user_h, user_data, o_cb=None):
"""
App*, MDataInfo*, SignPubKeyHandle, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, SignPubKeyHandle user_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, PermissionSet* perm_set)
"""
@ffi.callback("void(void* ,FfiResult* ,PermissionSet*)")
def _mdata_list_user_permissions_o_cb(user_data ,result ,perm_set):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,perm_set)
self.lib.safe_app.mdata_list_user_permissions(app, info, user_h, user_data, _mdata_list_user_permissions_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_set_user_permissions(self, app, info, user_h, permission_set, version, user_data, o_cb=None):
"""
App*, MDataInfo*, SignPubKeyHandle, PermissionSet*, uint64_t, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, SignPubKeyHandle user_h, PermissionSet* permission_set, uint64_t version, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_set_user_permissions_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_set_user_permissions(app, info, user_h, permission_set, version, user_data, _mdata_set_user_permissions_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_del_user_permissions(self, app, info, user_h, version, user_data, o_cb=None):
"""
App*, MDataInfo*, SignPubKeyHandle, uint64_t, [any], [function], [custom ffi lib]
App* app, MDataInfo* info, SignPubKeyHandle user_h, uint64_t version, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_del_user_permissions_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_del_user_permissions(app, info, user_h, version, user_data, _mdata_del_user_permissions_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_permissions_new(self, app, user_data, o_cb=None):
"""
App*, [any], [function], [custom ffi lib]
App* app, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataPermissionsHandle perm_h)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataPermissionsHandle)")
def _mdata_permissions_new_o_cb(user_data ,result ,perm_h):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,perm_h)
self.lib.safe_app.mdata_permissions_new(app, user_data, _mdata_permissions_new_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_permissions_len(self, app, permissions_h, user_data, o_cb=None):
"""
App*, MDataPermissionsHandle, [any], [function], [custom ffi lib]
App* app, MDataPermissionsHandle permissions_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uintptr_t size)
"""
@ffi.callback("void(void* ,FfiResult* ,uintptr_t)")
def _mdata_permissions_len_o_cb(user_data ,result ,size):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,size)
self.lib.safe_app.mdata_permissions_len(app, permissions_h, user_data, _mdata_permissions_len_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_permissions_get(self, app, permissions_h, user_h, user_data, o_cb=None):
"""
App*, MDataPermissionsHandle, SignPubKeyHandle, [any], [function], [custom ffi lib]
App* app, MDataPermissionsHandle permissions_h, SignPubKeyHandle user_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, PermissionSet* perm_set)
"""
@ffi.callback("void(void* ,FfiResult* ,PermissionSet*)")
def _mdata_permissions_get_o_cb(user_data ,result ,perm_set):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,perm_set)
self.lib.safe_app.mdata_permissions_get(app, permissions_h, user_h, user_data, _mdata_permissions_get_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_permission_sets(self, app, permissions_h, user_data, o_cb=None):
"""
App*, MDataPermissionsHandle, [any], [function], [custom ffi lib]
App* app, MDataPermissionsHandle permissions_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, UserPermissionSet* user_perm_sets, uintptr_t user_perm_sets_len)
"""
@ffi.callback("void(void* ,FfiResult* ,UserPermissionSet* ,uintptr_t)")
def _mdata_list_permission_sets_o_cb(user_data ,result ,user_perm_sets ,user_perm_sets_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,user_perm_sets ,user_perm_sets_len)
self.lib.safe_app.mdata_list_permission_sets(app, permissions_h, user_data, _mdata_list_permission_sets_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_permissions_insert(self, app, permissions_h, user_h, permission_set, user_data, o_cb=None):
"""
App*, MDataPermissionsHandle, SignPubKeyHandle, PermissionSet*, [any], [function], [custom ffi lib]
App* app, MDataPermissionsHandle permissions_h, SignPubKeyHandle user_h, PermissionSet* permission_set, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_permissions_insert_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_permissions_insert(app, permissions_h, user_h, permission_set, user_data, _mdata_permissions_insert_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_permissions_free(self, app, permissions_h, user_data, o_cb=None):
"""
App*, MDataPermissionsHandle, [any], [function], [custom ffi lib]
App* app, MDataPermissionsHandle permissions_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_permissions_free_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_permissions_free(app, permissions_h, user_data, _mdata_permissions_free_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entry_actions_new(self, app, user_data, o_cb=None):
"""
App*, [any], [function], [custom ffi lib]
App* app, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataEntryActionsHandle entry_actions_h)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataEntryActionsHandle)")
def _mdata_entry_actions_new_o_cb(user_data ,result ,entry_actions_h):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,entry_actions_h)
self.lib.safe_app.mdata_entry_actions_new(app, user_data, _mdata_entry_actions_new_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entry_actions_insert(self, app, actions_h, key, key_len, value, value_len, user_data, o_cb=None):
"""
App*, MDataEntryActionsHandle, uint8_t*, uintptr_t, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
App* app, MDataEntryActionsHandle actions_h, uint8_t* key, uintptr_t key_len, uint8_t* value, uintptr_t value_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entry_actions_insert_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entry_actions_insert(app, actions_h, key, key_len, value, value_len, user_data, _mdata_entry_actions_insert_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entry_actions_update(self, app, actions_h, key, key_len, value, value_len, entry_version, user_data, o_cb=None):
"""
App*, MDataEntryActionsHandle, uint8_t*, uintptr_t, uint8_t*, uintptr_t, uint64_t, [any], [function], [custom ffi lib]
App* app, MDataEntryActionsHandle actions_h, uint8_t* key, uintptr_t key_len, uint8_t* value, uintptr_t value_len, uint64_t entry_version, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entry_actions_update_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entry_actions_update(app, actions_h, key, key_len, value, value_len, entry_version, user_data, _mdata_entry_actions_update_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entry_actions_delete(self, app, actions_h, key, key_len, entry_version, user_data, o_cb=None):
"""
App*, MDataEntryActionsHandle, uint8_t*, uintptr_t, uint64_t, [any], [function], [custom ffi lib]
App* app, MDataEntryActionsHandle actions_h, uint8_t* key, uintptr_t key_len, uint64_t entry_version, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entry_actions_delete_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entry_actions_delete(app, actions_h, key, key_len, entry_version, user_data, _mdata_entry_actions_delete_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entry_actions_free(self, app, actions_h, user_data, o_cb=None):
"""
App*, MDataEntryActionsHandle, [any], [function], [custom ffi lib]
App* app, MDataEntryActionsHandle actions_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entry_actions_free_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entry_actions_free(app, actions_h, user_data, _mdata_entry_actions_free_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries_new(self, app, user_data, o_cb=None):
"""
App*, [any], [function], [custom ffi lib]
App* app, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataEntriesHandle entries_h)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataEntriesHandle)")
def _mdata_entries_new_o_cb(user_data ,result ,entries_h):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,entries_h)
self.lib.safe_app.mdata_entries_new(app, user_data, _mdata_entries_new_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries_insert(self, app, entries_h, key, key_len, value, value_len, user_data, o_cb=None):
"""
App*, MDataEntriesHandle, uint8_t*, uintptr_t, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
App* app, MDataEntriesHandle entries_h, uint8_t* key, uintptr_t key_len, uint8_t* value, uintptr_t value_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entries_insert_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entries_insert(app, entries_h, key, key_len, value, value_len, user_data, _mdata_entries_insert_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries_len(self, app, entries_h, user_data, o_cb=None):
"""
App*, MDataEntriesHandle, [any], [function], [custom ffi lib]
App* app, MDataEntriesHandle entries_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uintptr_t len)
"""
@ffi.callback("void(void* ,FfiResult* ,uintptr_t)")
def _mdata_entries_len_o_cb(user_data ,result ,len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,len)
self.lib.safe_app.mdata_entries_len(app, entries_h, user_data, _mdata_entries_len_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries_get(self, app, entries_h, key, key_len, user_data, o_cb=None):
"""
App*, MDataEntriesHandle, uint8_t*, uintptr_t, [any], [function], [custom ffi lib]
App* app, MDataEntriesHandle entries_h, uint8_t* key, uintptr_t key_len, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, uint8_t* content, uintptr_t content_len, uint64_t version)
"""
@ffi.callback("void(void* ,FfiResult* ,uint8_t* ,uintptr_t ,uint64_t)")
def _mdata_entries_get_o_cb(user_data ,result ,content ,content_len ,version):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,content ,content_len ,version)
self.lib.safe_app.mdata_entries_get(app, entries_h, key, key_len, user_data, _mdata_entries_get_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_list_entries(self, app, entries_h, user_data, o_cb=None):
"""
App*, MDataEntriesHandle, [any], [function], [custom ffi lib]
App* app, MDataEntriesHandle entries_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result, MDataEntry* entries, uintptr_t entries_len)
"""
@ffi.callback("void(void* ,FfiResult* ,MDataEntry* ,uintptr_t)")
def _mdata_list_entries_o_cb(user_data ,result ,entries ,entries_len):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result ,entries ,entries_len)
self.lib.safe_app.mdata_list_entries(app, entries_h, user_data, _mdata_list_entries_o_cb)
@safeUtils.safeThread(timeout=5,queue=appQueue)
def _mdata_entries_free(self, app, entries_h, user_data, o_cb=None):
"""
App*, MDataEntriesHandle, [any], [function], [custom ffi lib]
App* app, MDataEntriesHandle entries_h, void* user_data
> callback functions:
(*o_cb)(void* user_data, FfiResult* result)
"""
@ffi.callback("void(void* ,FfiResult*)")
def _mdata_entries_free_o_cb(user_data ,result):
self.safeUtils.checkResult(result)
appQueue.put('gotResult')
if o_cb:
o_cb(user_data ,result)
self.lib.safe_app.mdata_entries_free(app, entries_h, user_data, _mdata_entries_free_o_cb)
| 41.242604
| 166
| 0.6301
| 4,252
| 34,850
| 4.822201
| 0.035983
| 0.091299
| 0.04565
| 0.041845
| 0.919187
| 0.881389
| 0.836861
| 0.803697
| 0.768289
| 0.724054
| 0
| 0.004961
| 0.265481
| 34,850
| 845
| 167
| 41.242604
| 0.796039
| 0.265395
| 0
| 0.586486
| 0
| 0
| 0.077398
| 0.003176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.218919
| false
| 0
| 0.005405
| 0
| 0.232432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf6ea007f45a48810b0ca333d482abfb98d84ed7
| 172
|
py
|
Python
|
calculadora/calculos.py
|
VitorSorriso/calculadora-python
|
7af46b3f4b51ebc03e1217c604fe04878dc7f5c3
|
[
"MIT"
] | 1
|
2020-05-14T20:57:09.000Z
|
2020-05-14T20:57:09.000Z
|
calculadora/calculos.py
|
VitorSorriso/calculadora-python
|
7af46b3f4b51ebc03e1217c604fe04878dc7f5c3
|
[
"MIT"
] | null | null | null |
calculadora/calculos.py
|
VitorSorriso/calculadora-python
|
7af46b3f4b51ebc03e1217c604fe04878dc7f5c3
|
[
"MIT"
] | null | null | null |
def soma(n1, n2):
return n1 + n2
def subtracao(n1,n2):
return n1 - n2
def multiplicacao(n1, n2):
return n1 * n2
def divisao(n1, n2):
return n1 / n2
| 13.230769
| 26
| 0.587209
| 28
| 172
| 3.607143
| 0.285714
| 0.316832
| 0.39604
| 0.475248
| 0.643564
| 0.504951
| 0
| 0
| 0
| 0
| 0
| 0.132231
| 0.296512
| 172
| 13
| 27
| 13.230769
| 0.702479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
44c63aa0301b489cc7255d45f70ed395b9de7120
| 60
|
py
|
Python
|
learning_python/ch24.py
|
ykyang/org.allnix.python
|
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
|
[
"Apache-2.0"
] | null | null | null |
learning_python/ch24.py
|
ykyang/org.allnix.python
|
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
|
[
"Apache-2.0"
] | null | null | null |
learning_python/ch24.py
|
ykyang/org.allnix.python
|
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
|
[
"Apache-2.0"
] | null | null | null |
import org.allnix.lp.util;
print(org.allnix.lp.util.read())
| 20
| 32
| 0.75
| 11
| 60
| 4.090909
| 0.636364
| 0.4
| 0.488889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 60
| 3
| 32
| 20
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
7827eed35f79f32d44be50575f42d15c59d7225e
| 180
|
py
|
Python
|
tests/test_004.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
tests/test_004.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
tests/test_004.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
"""https://projecteuler.net/problem=4"""
from euler.main import largest_palindrome
def test_004() -> None:
"""Expected: 906609"""
assert largest_palindrome(3) == 906609
| 20
| 42
| 0.694444
| 22
| 180
| 5.545455
| 0.863636
| 0.278689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11039
| 0.144444
| 180
| 8
| 43
| 22.5
| 0.681818
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
785ad1d00b8f51a5113fa8aa38ba44612e07ebea
| 21,421
|
py
|
Python
|
sdk/python/pulumi_azure/appservice/public_certificate.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/appservice/public_certificate.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/appservice/public_certificate.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PublicCertificateArgs', 'PublicCertificate']
@pulumi.input_type
class PublicCertificateArgs:
def __init__(__self__, *,
app_service_name: pulumi.Input[str],
blob: pulumi.Input[str],
certificate_location: pulumi.Input[str],
certificate_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
The set of arguments for constructing a PublicCertificate resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
pulumi.set(__self__, "app_service_name", app_service_name)
pulumi.set(__self__, "blob", blob)
pulumi.set(__self__, "certificate_location", certificate_location)
pulumi.set(__self__, "certificate_name", certificate_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Input[str]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def blob(self) -> pulumi.Input[str]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@blob.setter
def blob(self, value: pulumi.Input[str]):
pulumi.set(self, "blob", value)
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> pulumi.Input[str]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@certificate_location.setter
def certificate_location(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_location", value)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Input[str]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class _PublicCertificateState:
def __init__(__self__, *,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PublicCertificate resources.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""
if app_service_name is not None:
pulumi.set(__self__, "app_service_name", app_service_name)
if blob is not None:
pulumi.set(__self__, "blob", blob)
if certificate_location is not None:
pulumi.set(__self__, "certificate_location", certificate_location)
if certificate_name is not None:
pulumi.set(__self__, "certificate_name", certificate_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def blob(self) -> Optional[pulumi.Input[str]]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@blob.setter
def blob(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob", value)
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@certificate_location.setter
def certificate_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_location", value)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
The thumbprint of the public certificate.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
class PublicCertificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PublicCertificateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param PublicCertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PublicCertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PublicCertificateArgs.__new__(PublicCertificateArgs)
if app_service_name is None and not opts.urn:
raise TypeError("Missing required property 'app_service_name'")
__props__.__dict__["app_service_name"] = app_service_name
if blob is None and not opts.urn:
raise TypeError("Missing required property 'blob'")
__props__.__dict__["blob"] = blob
if certificate_location is None and not opts.urn:
raise TypeError("Missing required property 'certificate_location'")
__props__.__dict__["certificate_location"] = certificate_location
if certificate_name is None and not opts.urn:
raise TypeError("Missing required property 'certificate_name'")
__props__.__dict__["certificate_name"] = certificate_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = None
super(PublicCertificate, __self__).__init__(
'azure:appservice/publicCertificate:PublicCertificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None) -> 'PublicCertificate':
"""
Get an existing PublicCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PublicCertificateState.__new__(_PublicCertificateState)
__props__.__dict__["app_service_name"] = app_service_name
__props__.__dict__["blob"] = blob
__props__.__dict__["certificate_location"] = certificate_location
__props__.__dict__["certificate_name"] = certificate_name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = thumbprint
return PublicCertificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Output[str]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@property
@pulumi.getter
def blob(self) -> pulumi.Output[str]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> pulumi.Output[str]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Output[str]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[str]:
"""
The thumbprint of the public certificate.
"""
return pulumi.get(self, "thumbprint")
| 48.794989
| 233
| 0.678633
| 2,476
| 21,421
| 5.648627
| 0.079968
| 0.06292
| 0.073073
| 0.07529
| 0.870585
| 0.848134
| 0.82125
| 0.801802
| 0.787144
| 0.759688
| 0
| 0.00603
| 0.233509
| 21,421
| 438
| 234
| 48.906393
| 0.845788
| 0.422109
| 0
| 0.575893
| 1
| 0
| 0.126288
| 0.010397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0.004464
| 0.022321
| 0
| 0.272321
| 0.058036
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
788709d330b05666610bcd073962880fd5a9d689
| 95,437
|
py
|
Python
|
Spine_compartment.py
|
rribeiro-sci/CA3-CA1_SynapticModel
|
cd3e3e33c5ac816063d1186f90de043d2dac39d0
|
[
"Apache-2.0"
] | 1
|
2021-05-29T17:23:53.000Z
|
2021-05-29T17:23:53.000Z
|
Spine_compartment.py
|
rribeiro-sci/CA3-CA1_SynapticModel
|
cd3e3e33c5ac816063d1186f90de043d2dac39d0
|
[
"Apache-2.0"
] | null | null | null |
Spine_compartment.py
|
rribeiro-sci/CA3-CA1_SynapticModel
|
cd3e3e33c5ac816063d1186f90de043d2dac39d0
|
[
"Apache-2.0"
] | 1
|
2021-01-18T11:36:06.000Z
|
2021-01-18T11:36:06.000Z
|
from sympy import *
from pysb import *
from pysb.macros import *
from pysb.integrate import Solver
from pysb.simulator import ScipyOdeSimulator
import numpy as np
import math
CaMKII_states = [
#CaMKII-CaM complexes
'APO','CaMKII_CaM0','CaMKII_CaM1C','CaMKII_CaM2C','CaMKII_CaM1N','CaMKII_CaM2N',\
'CaMKII_CaM1C1N','CaMKII_CaM1C2N','CaMKII_CaM2C1N','CaMKII_CaM4',\
#CaMKII-CaMKII complexes
'CaMKII_CaM0_CaMKII_CaM0','CaMKII_CaM0_CaMKII_CaM1C','CaMKII_CaM0_CaMKII_CaM2C',\
'CaMKII_CaM0_CaMKII_CaM1N','CaMKII_CaM0_CaMKII_CaM2N','CaMKII_CaM0_CaMKII_CaM1C1N',\
'CaMKII_CaM0_CaMKII_CaM2C1N','CaMKII_CaM0_CaMKII_CaM1C2N','CaMKII_CaM0_CaMKII_CaM4',\
'CaMKII_CaM1C_CaMKII_CaM0','CaMKII_CaM1C_CaMKII_CaM1C','CaMKII_CaM1C_CaMKII_CaM2C',\
'CaMKII_CaM1C_CaMKII_CaM1N','CaMKII_CaM1C_CaMKII_CaM2N','CaMKII_CaM1C_CaMKII_CaM1C1N',\
'CaMKII_CaM1C_CaMKII_CaM2C1N','CaMKII_CaM1C_CaMKII_CaM1C2N','CaMKII_CaM1C_CaMKII_CaM4',\
'CaMKII_CaM2C_CaMKII_CaM0','CaMKII_CaM2C_CaMKII_CaM1C','CaMKII_CaM2C_CaMKII_CaM2C',\
'CaMKII_CaM2C_CaMKII_CaM1N','CaMKII_CaM2C_CaMKII_CaM2N','CaMKII_CaM2C_CaMKII_CaM1C1N',\
'CaMKII_CaM2C_CaMKII_CaM2C1N','CaMKII_CaM2C_CaMKII_CaM1C2N','CaMKII_CaM2C_CaMKII_CaM4',\
'CaMKII_CaM1N_CaMKII_CaM0','CaMKII_CaM1N_CaMKII_CaM1C','CaMKII_CaM1N_CaMKII_CaM2C',\
'CaMKII_CaM1N_CaMKII_CaM1N','CaMKII_CaM1N_CaMKII_CaM2N','CaMKII_CaM1N_CaMKII_CaM1C1N',\
'CaMKII_CaM1N_CaMKII_CaM2C1N','CaMKII_CaM1N_CaMKII_CaM1C2N','CaMKII_CaM1N_CaMKII_CaM4',\
'CaMKII_CaM2N_CaMKII_CaM0','CaMKII_CaM2N_CaMKII_CaM1C','CaMKII_CaM2N_CaMKII_CaM2C',\
'CaMKII_CaM2N_CaMKII_CaM1N','CaMKII_CaM2N_CaMKII_CaM2N','CaMKII_CaM2N_CaMKII_CaM1C1N',\
'CaMKII_CaM2N_CaMKII_CaM2C1N','CaMKII_CaM2N_CaMKII_CaM1C2N','CaMKII_CaM2N_CaMKII_CaM4',\
'CaMKII_CaM1C1N_CaMKII_CaM0','CaMKII_CaM1C1N_CaMKII_CaM1C','CaMKII_CaM1C1N_CaMKII_CaM2C',\
'CaMKII_CaM1C1N_CaMKII_CaM1N','CaMKII_CaM1C1N_CaMKII_CaM2N','CaMKII_CaM1C1N_CaMKII_CaM1C1N',\
'CaMKII_CaM1C1N_CaMKII_CaM2C1N','CaMKII_CaM1C1N_CaMKII_CaM1C2N','CaMKII_CaM1C1N_CaMKII_CaM4',\
'CaMKII_CaM2C1N_CaMKII_CaM0','CaMKII_CaM2C1N_CaMKII_CaM1C','CaMKII_CaM2C1N_CaMKII_CaM2C',\
'CaMKII_CaM2C1N_CaMKII_CaM1N','CaMKII_CaM2C1N_CaMKII_CaM2N','CaMKII_CaM2C1N_CaMKII_CaM1C1N',\
'CaMKII_CaM2C1N_CaMKII_CaM2C1N','CaMKII_CaM2C1N_CaMKII_CaM1C2N','CaMKII_CaM2C1N_CaMKII_CaM4',\
'CaMKII_CaM1C2N_CaMKII_CaM0','CaMKII_CaM1C2N_CaMKII_CaM1C','CaMKII_CaM1C2N_CaMKII_CaM2C',\
'CaMKII_CaM1C2N_CaMKII_CaM1N','CaMKII_CaM1C2N_CaMKII_CaM2N','CaMKII_CaM1C2N_CaMKII_CaM1C1N',\
'CaMKII_CaM1C2N_CaMKII_CaM2C1N','CaMKII_CaM1C2N_CaMKII_CaM1C2N','CaMKII_CaM1C2N_CaMKII_CaM4',\
'CaMKII_CaM4_CaMKII_CaM0','CaMKII_CaM4_CaMKII_CaM1C','CaMKII_CaM4_CaMKII_CaM2C',\
'CaMKII_CaM4_CaMKII_CaM1N','CaMKII_CaM4_CaMKII_CaM2N','CaMKII_CaM4_CaMKII_CaM1C1N',\
'CaMKII_CaM4_CaMKII_CaM2C1N','CaMKII_CaM4_CaMKII_CaM1C2N','CaMKII_CaM4_CaMKII_CaM4',\
]
def network(init_cond):
Model()
#MONOMERS
Monomer('CaM', ['CaM_b1','CaM_s'], {'CaM_s':['CaM0','CaM1C','CaM2C','CaM1N','CaM2N','CaM1C1N','CaM1C2N','CaM2C1N','CaM4']})
#N.B : 'CaMKII' is a single monomeric subunit of the whole dodecameric CaMKII structure.
Monomer('CaMKII', ['CaMKII_b1','CaMKII_s','CaMKII_p'],
{'CaMKII_s': CaMKII_states,'CaMKII_p':['p0','p1']})
Monomer('Ca')
#INITIAL CONDITIONS
Initial(CaM(CaM_b1=None, CaM_s='CaM0'), Parameter('CaM_init', init_cond['CaM_init'])) # uM
Initial(CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0'), Parameter('CaMKII_init', init_cond['CaMKII_init'])) # uM
Initial(Ca(), Parameter('Ca_init', init_cond['Ca_init'])) # uM
#PARAMETERS
# counter
# Parameter('counter_speed', 1)
# Observable('time', counter())
#Ca binding CaM
Parameter('CaM_1C_on', 4.000) # 1/(uM*s)
Parameter('CaM_1C_off', 40.000) # 1/s
Parameter('CaM_2C_on', 10.000) # 1/(uM*s)
Parameter('CaM_2C_off', 9.250) # 1/s
Parameter('CaM_1N_on', 100.000) # 1/(uM*s)
Parameter('CaM_1N_off', 2500.000) # 1/s
Parameter('CaM_2N_on', 150.000) # 1/(uM*s)
Parameter('CaM_2N_off', 750.000) # 1/s
#CaMKII dimerization
Parameter('CaMKII2_on', 50) # 1/(uM*s)
Parameter('CaMKII2_off', 60) # 1/s
Parameter('CaMKII_pCaMKII_on', 50) # 1/(uM*s)
Parameter('CaMKII_pCaMKII_off', 60) # 1/s
#CaM binding CaMKII
Parameter('CaMKII_CaM0_on', 0.0038) # 1/(uM*s)
Parameter('CaMKII_CaM0_off', 5.5) # 1/s
Parameter('CaMKII_CaM1C1N_on', 3.3) # 1/(uM*s)
Parameter('CaMKII_CaM1C1N_off', 3.4) # 1/s
Parameter('CaMKII_CaM1C2N_on', 1.9) # 1/(uM*s)
Parameter('CaMKII_CaM1C2N_off', 1.9) # 1/s
Parameter('CaMKII_CaM1C_on', 0.059) # 1/(uM*s)
Parameter('CaMKII_CaM1C_off', 6.8) # 1/s
Parameter('CaMKII_CaM1N_on', 0.022) # 1/(uM*s)
Parameter('CaMKII_CaM1N_off', 3.1) # 1/s
Parameter('CaMKII_CaM2C1N_on', 5.2) # 1/(uM*s)
Parameter('CaMKII_CaM2C1N_off', 3.8) # 1/s
Parameter('CaMKII_CaM2C_on', 0.92) # 1/(uM*s)
Parameter('CaMKII_CaM2C_off', 6.8) # 1/s
Parameter('CaMKII_CaM2N_on', 0.1) # 1/(uM*s)
Parameter('CaMKII_CaM2N_off', 1.7) # 1/s
Parameter('CaMKII_CaM4_on', 30) # 1/(uM*s)
Parameter('CaMKII_CaM4_off', 1.7) # 1/s
#Ca binding CaM-CaMKII complex
Parameter('CaMKII_CaM_1C_on', 44) # 1/(uM*s)
Parameter('CaMKII_CaM_1C_off', 33) # 1/s
Parameter('CaMKII_CaM_1N_on', 75) # 1/(uM*s)
Parameter('CaMKII_CaM_1N_off', 300) # 1/s
Parameter('CaMKII_CaM_2C_on', 44) # 1/(uM*s)
Parameter('CaMKII_CaM_2C_off', 2.7) # 1/s
Parameter('CaMKII_CaM_2N_on', 76) # 1/(uM*s)
Parameter('CaMKII_CaM_2N_off', 33) # 1/s
#CaMKII autophosphorylation
Parameter('pCaMKII_CaM0', 0) # 1/s
Parameter('pCaMKII_CaM1C', 0.032) # 1/s
Parameter('pCaMKII_CaM1C1N', 0.094) # 1/s
Parameter('pCaMKII_CaM1C2N', 0.154) # 1/s
Parameter('pCaMKII_CaM1N', 0.060) # 1/s
Parameter('pCaMKII_CaM2C', 0.064) # 1/s
Parameter('pCaMKII_CaM2C1N', 0.124) # 1/s
Parameter('pCaMKII_CaM2N', 0.120) # 1/s
Parameter('pCaMKII_CaM4', 0.960) # 1/s
# CaMKII + PPI
Parameter('pCaMKII_PPI_on', 3.0) # 1/(uM*s)
Parameter('pCaMKII_PPI_off', 0.5) # 1/s
Parameter('pCaMKII_dephosph', 2.0) # 1/s
#RULES
# let Counter flow
# Rule('counter_increment', None >> counter(), counter_speed)
###### Working only for Simulate_alfa !!!
# Ca inflow
# Parameter('Ca_inflow_k', 0)
# Rule('Ca_inflow', None >> Ca(), Ca_inflow_k)
# # Ca outflow
# Parameter('Ca_outflow_k', 1/0.02)
# Rule('Ca_outflow', Ca() >> None, Ca_outflow_k)
######
# Ca binding CaM (reactions 1-24)
Rule('CaM0_Ca_C', CaM(CaM_b1=None, CaM_s='CaM0') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1C') , CaM_1C_on, CaM_1C_off)
Rule('CaM1C_Ca_C', CaM(CaM_b1=None, CaM_s='CaM1C') + Ca() | CaM(CaM_b1=None, CaM_s='CaM2C') , CaM_2C_on, CaM_2C_off)
Rule('CaM0_Ca_N', CaM(CaM_b1=None, CaM_s='CaM0') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1N') , CaM_1N_on, CaM_1N_off)
Rule('CaM1N_Ca_N', CaM(CaM_b1=None, CaM_s='CaM1N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM2N') , CaM_2N_on, CaM_2N_off)
Rule('CaM1C_Ca_N', CaM(CaM_b1=None, CaM_s='CaM1C') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1C1N') , CaM_1N_on, CaM_1N_off)
Rule('CaM1C1N_Ca_N', CaM(CaM_b1=None, CaM_s='CaM1C1N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1C2N') , CaM_2N_on, CaM_2N_off)
Rule('CaM2C_Ca_N', CaM(CaM_b1=None, CaM_s='CaM2C') + Ca() | CaM(CaM_b1=None, CaM_s='CaM2C1N') , CaM_1N_on, CaM_1N_off)
Rule('CaM2C1N_Ca_N', CaM(CaM_b1=None, CaM_s='CaM2C1N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM4') , CaM_2N_on, CaM_2N_off)
Rule('CaM1N_Ca_C', CaM(CaM_b1=None, CaM_s='CaM1N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1C1N') , CaM_1C_on, CaM_1C_off)
Rule('CaM1C1N_Ca_C', CaM(CaM_b1=None, CaM_s='CaM1C1N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM2C1N') , CaM_2C_on, CaM_2C_off)
Rule('CaM2N_Ca_C', CaM(CaM_b1=None, CaM_s='CaM2N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM1C2N') , CaM_1C_on, CaM_1C_off)
Rule('CaM1C2N_Ca_C', CaM(CaM_b1=None, CaM_s='CaM1C2N') + Ca() | CaM(CaM_b1=None, CaM_s='CaM4') , CaM_2C_on, CaM_2C_off)
#CaM binding CaMKII (reactions 49-66)
Rule('CaM0_CaMKII', CaM(CaM_b1=None, CaM_s='CaM0') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0'), CaMKII_CaM0_on, CaMKII_CaM0_off)
Rule('CaM1C_CaMKII', CaM(CaM_b1=None, CaM_s='CaM1C') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0'), CaMKII_CaM1C_on, CaMKII_CaM1C_off)
Rule('CaM2C_CaMKII', CaM(CaM_b1=None, CaM_s='CaM2C') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), CaMKII_CaM2C_on, CaMKII_CaM2C_off)
Rule('CaM1N_CaMKII', CaM(CaM_b1=None, CaM_s='CaM1N') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0'), CaMKII_CaM1N_on, CaMKII_CaM1N_off)
Rule('CaM2N_CaMKII', CaM(CaM_b1=None, CaM_s='CaM2N') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0'), CaMKII_CaM2N_on, CaMKII_CaM2N_off)
Rule('CaM1C1N_CaMKII', CaM(CaM_b1=None, CaM_s='CaM1C1N') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII_CaM1C1N_on, CaMKII_CaM1C1N_off)
Rule('CaM1C2N_CaMKII', CaM(CaM_b1=None, CaM_s='CaM1C2N') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII_CaM1C2N_on, CaMKII_CaM1C2N_off)
Rule('CaM2C1N_CaMKII', CaM(CaM_b1=None, CaM_s='CaM2C1N') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII_CaM2C1N_on, CaMKII_CaM2C1N_off)
Rule('CaM4_CaMKII', CaM(CaM_b1=None, CaM_s='CaM4') + CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), CaMKII_CaM4_on, CaMKII_CaM4_off)
#Ca binding CaM-CaMKII dimers (reactions 25-48)
Rule('CaMKII_CaM0_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') , CaMKII_CaM_1C_on, CaMKII_CaM_1C_off)
Rule('CaMKII_CaM1C_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') , CaMKII_CaM_2C_on, CaMKII_CaM_2C_off)
Rule('CaMKII_CaM0_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') , CaMKII_CaM_1N_on, CaMKII_CaM_1N_off)
Rule('CaMKII_CaM1N_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') , CaMKII_CaM_2N_on, CaMKII_CaM_2N_off)
Rule('CaMKII_CaM1C_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') , CaMKII_CaM_1N_on, CaMKII_CaM_1N_off)
Rule('CaMKII_CaM1N_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') , CaMKII_CaM_1C_on, CaMKII_CaM_1C_off)
Rule('CaMKII_CaM2C_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') , CaMKII_CaM_1N_on, CaMKII_CaM_1N_off)
Rule('CaMKII_CaM2N_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') , CaMKII_CaM_1C_on, CaMKII_CaM_1C_off)
Rule('CaMKII_CaM1C1N_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') , CaMKII_CaM_2C_on, CaMKII_CaM_2C_off)
Rule('CaMKII_CaM1C1N_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') , CaMKII_CaM_2N_on, CaMKII_CaM_2N_off)
Rule('CaMKII_CaM2C1N_Ca_N',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') , CaMKII_CaM_2N_on, CaMKII_CaM_2N_off)
Rule('CaMKII_CaM1C2N_Ca_C',CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + Ca() | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') , CaMKII_CaM_2C_on, CaMKII_CaM_2C_off)
#CaM-CaMKII dimers + CaM-CaMKII dimers complexation (reactions 67-156)
Rule('CaMKII_CaM0_CaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM0', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM0_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1N_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2N_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2N_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C1N_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C1N_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C1N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C2N_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C2N', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
Rule('CaMKII_CaM4_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM4', CaMKII_p='p0'), CaMKII2_on, CaMKII2_off)
#CaM-CaMKII%CaM-CaMKII complexes autophosphorylation (reactions 157-237)
Rule('CaMKII_CaM0_CaMKII_CaM1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1'), pCaMKII_CaM1N)
Rule('CaMKII_CaM0_CaMKII_CaM2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1'), pCaMKII_CaM2N)
Rule('CaMKII_CaM0_CaMKII_CaM1C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM1C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1'), pCaMKII_CaM1C)
Rule('CaMKII_CaM0_CaMKII_CaM1C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM1C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM0_CaMKII_CaM1C2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM0_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM0_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM0_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM0)
Rule('CaMKII_CaM0_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM1N_CaMKII_CaM2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1'), pCaMKII_CaM2N)
Rule('CaMKII_CaM1N_CaMKII_CaM1C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM1C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1N_CaMKII_CaM1C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM1C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1N_CaMKII_CaM1C2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1N_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM1N_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM1N_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM1N)
Rule('CaMKII_CaM1N_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM2N_CaMKII_CaM1C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM1C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1'), pCaMKII_CaM1C)
Rule('CaMKII_CaM2N_CaMKII_CaM1C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM1C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM2N_CaMKII_CaM1C2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2N_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM2N_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM2N_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM2N)
Rule('CaMKII_CaM2N_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM1C_CaMKII_CaM1C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C_CaMKII_CaM1C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C_CaMKII_CaM1C2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1C_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM1C_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM1C_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM1C1N_CaMKII_CaM1C2N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM1C1N_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C1N_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM1C2N_CaMKII_CaM2C_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1C2N_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM1C2N_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1C2N_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM1C2N_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM1C2N_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM2C_CaMKII_CaM2C1N_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'), pCaMKII_CaM2C)
Rule('CaMKII_CaM2C_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM2C_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM2C)
Rule('CaMKII_CaM2C_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM2C1N_CaMKII_CaM4_p1', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM2C1N_CaMKII_CaM4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
Rule('CaMKII_CaM0_CaMKII_CaM0_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM0', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1'), pCaMKII_CaM0)
Rule('CaMKII_CaM1N_CaMKII_CaM1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1'), pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_CaMKII_CaM2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1'), pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_CaMKII_CaM1C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1'), pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_CaMKII_CaM1C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'), pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_CaMKII_CaM1C2N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C2N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'), pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_CaMKII_CaM2C_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'), pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_CaMKII_CaM2C1N_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C1N', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'), pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_CaMKII_CaMC4_p2', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM4', CaMKII_p='p0') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'), pCaMKII_CaM4)
# #CaM-CaMKII dimers + pCaM-CaMKII phosphorylated dimers complexation (reactions 238-399)
Rule('CaMKII_CaM0_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM0', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM0', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C2N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2C', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2C1N', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM0_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1N_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2N_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
Rule('CaMKII_CaM4_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') | CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM4', CaMKII_p='p1'), CaMKII_pCaMKII_on, CaMKII_pCaMKII_off)
#pCaM-CaMKII%CaM-CaMKII complexes autophosphorylation (reactions 400-480)
Rule('CaMKII_CaM0_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM0_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM0', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM1C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM1C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM1C2N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM1C2N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM2C_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2C', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM2C1N_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM2C1N', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') ,pCaMKII_CaM4)
Rule('CaMKII_CaM0_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM0', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM0)
Rule('CaMKII_CaM1N_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM1N)
Rule('CaMKII_CaM2N_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM2N)
Rule('CaMKII_CaM1C_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM1C)
Rule('CaMKII_CaM1C1N_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM1C1N)
Rule('CaMKII_CaM1C2N_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM1C2N)
Rule('CaMKII_CaM2C_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM2C)
Rule('CaMKII_CaM2C1N_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM2C1N)
Rule('CaMKII_CaM4_pCaMKII_CaM4_autophospho', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4_CaMKII_CaM4', CaMKII_p='p1') >> CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') + CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1') ,pCaMKII_CaM4)
#OBSERVABLES
Observable('obs_CaM0', CaM(CaM_b1=None, CaM_s='CaM0'))
Observable('obs_CaM1C', CaM(CaM_b1=None, CaM_s='CaM1C'))
Observable('obs_CaM1N', CaM(CaM_b1=None, CaM_s='CaM1N'))
Observable('obs_CaM2C', CaM(CaM_b1=None, CaM_s='CaM2C'))
Observable('obs_CaM2N', CaM(CaM_b1=None, CaM_s='CaM2N'))
Observable('obs_CaM1C1N', CaM(CaM_b1=None, CaM_s='CaM1C1N'))
Observable('obs_CaM1C2N', CaM(CaM_b1=None, CaM_s='CaM1C2N'))
Observable('obs_CaM2C1N', CaM(CaM_b1=None, CaM_s='CaM2C1N'))
Observable('obs_CaM4', CaM(CaM_b1=None, CaM_s='CaM4'))
Observable('obs_CaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p0'))
Observable('obs_CaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p0'))
Observable('obs_pCaMKII_CaM1C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM2C', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1N', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2N', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM1C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C1N', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM2C1N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM2C1N', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM1C2N', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM1C2N', CaMKII_p='p1'))
Observable('obs_pCaMKII_CaM4', CaMKII(CaMKII_b1=None, CaMKII_s='CaMKII_CaM4', CaMKII_p='p1'))
Observable('obs_CaMKII_APO', CaMKII(CaMKII_b1=None, CaMKII_s='APO', CaMKII_p='p0'))
Observable('obs_Ca', Ca())
return model
| 171.341113
| 296
| 0.771472
| 15,103
| 95,437
| 4.437992
| 0.01013
| 0.086562
| 0.193206
| 0.248139
| 0.961419
| 0.930968
| 0.923434
| 0.904143
| 0.901786
| 0.862831
| 0
| 0.061203
| 0.075851
| 95,437
| 556
| 297
| 171.649281
| 0.698757
| 0.014449
| 0
| 0
| 0
| 0
| 0.320608
| 0.195692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002242
| false
| 0
| 0.015695
| 0
| 0.020179
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1568880808ca2cee59188d8ba9ce6ba5e0e7fbf9
| 1,587
|
py
|
Python
|
test/test_XMLtoJSON.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | 3
|
2018-06-15T18:02:05.000Z
|
2018-07-06T02:32:18.000Z
|
test/test_XMLtoJSON.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | 4
|
2018-08-17T06:51:34.000Z
|
2018-08-17T08:39:24.000Z
|
test/test_XMLtoJSON.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | null | null | null |
import json
import pytest
import xmljson
from lxml.etree import fromstring
schema_1 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>Heartrate</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>GeoLocation</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>TimeStamp</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>Pulse</name><type>int</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
schema_2 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>DataBlob</name><type>binarydata</type><is-null>true</is-null><table>data_1</table></field><field><name>BlobSize</name><type>int</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
schema_3 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>Image</name><type>base64_mime_image</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
@pytest.mark.parametrize("xml_str", [schema_1, schema_2, schema_3])
def test_xml_to_json(xml_str):
xml = fromstring(xml_str)
json_str = json.dumps(xmljson.gdata.data(xml))
d = json.loads(json_str)
print(json_str)
print(d)
| 79.35
| 576
| 0.733459
| 253
| 1,587
| 4.482213
| 0.201581
| 0.10582
| 0.088183
| 0.117284
| 0.710758
| 0.710758
| 0.710758
| 0.710758
| 0.710758
| 0.710758
| 0
| 0.009888
| 0.044108
| 1,587
| 19
| 577
| 83.526316
| 0.73764
| 0.008822
| 0
| 0
| 0
| 0.214286
| 0.768642
| 0.764181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
159b03fca1150d49148601489ee44b504f4466d2
| 10,398
|
py
|
Python
|
Algorithm.Python/stubs/QuantConnect/Indicators/__CandlestickPatterns_1.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | 2
|
2020-12-08T11:27:20.000Z
|
2021-04-06T13:21:15.000Z
|
Algorithm.Python/stubs/QuantConnect/Indicators/__CandlestickPatterns_1.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/stubs/QuantConnect/Indicators/__CandlestickPatterns_1.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | 1
|
2020-10-13T00:49:17.000Z
|
2020-10-13T00:49:17.000Z
|
from .__CandlestickPatterns_2 import *
import typing
import QuantConnect.Indicators.CandlestickPatterns
import datetime
class DojiStar(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Doji Star candlestick pattern indicator
DojiStar(name: str)
DojiStar()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.DojiStar:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.DojiStar:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.DojiStar:
pass
IsReady: bool
class DragonflyDoji(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Dragonfly Doji candlestick pattern indicator
DragonflyDoji(name: str)
DragonflyDoji()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.DragonflyDoji:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.DragonflyDoji:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.DragonflyDoji:
pass
IsReady: bool
class Engulfing(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Engulfing candlestick pattern
Engulfing(name: str)
Engulfing()
"""
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.Engulfing:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.Engulfing:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.Engulfing:
pass
IsReady: bool
class EveningDojiStar(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Evening Doji Star candlestick pattern
EveningDojiStar(name: str, penetration: Decimal)
EveningDojiStar(penetration: Decimal)
EveningDojiStar()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str, penetration: float) -> QuantConnect.Indicators.CandlestickPatterns.EveningDojiStar:
pass
@typing.overload
def __init__(self, penetration: float) -> QuantConnect.Indicators.CandlestickPatterns.EveningDojiStar:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.EveningDojiStar:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.EveningDojiStar:
pass
IsReady: bool
class EveningStar(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Evening Star candlestick pattern
EveningStar(name: str, penetration: Decimal)
EveningStar(penetration: Decimal)
EveningStar()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str, penetration: float) -> QuantConnect.Indicators.CandlestickPatterns.EveningStar:
pass
@typing.overload
def __init__(self, penetration: float) -> QuantConnect.Indicators.CandlestickPatterns.EveningStar:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.EveningStar:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.EveningStar:
pass
IsReady: bool
class GapSideBySideWhite(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Up/Down-gap side-by-side white lines candlestick pattern
GapSideBySideWhite(name: str)
GapSideBySideWhite()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.GapSideBySideWhite:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.GapSideBySideWhite:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.GapSideBySideWhite:
pass
IsReady: bool
class GravestoneDoji(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Gravestone Doji candlestick pattern indicator
GravestoneDoji(name: str)
GravestoneDoji()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.GravestoneDoji:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.GravestoneDoji:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.GravestoneDoji:
pass
IsReady: bool
class Hammer(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Hammer candlestick pattern indicator
Hammer(name: str)
Hammer()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.Hammer:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.Hammer:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.Hammer:
pass
IsReady: bool
class HangingMan(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Hanging Man candlestick pattern indicator
HangingMan(name: str)
HangingMan()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.HangingMan:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.HangingMan:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.HangingMan:
pass
IsReady: bool
class Harami(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Harami candlestick pattern indicator
Harami(name: str)
Harami()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.Harami:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.Harami:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.Harami:
pass
IsReady: bool
class HaramiCross(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Harami Cross candlestick pattern indicator
HaramiCross(name: str)
HaramiCross()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.HaramiCross:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.HaramiCross:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.HaramiCross:
pass
IsReady: bool
class HighWaveCandle(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
High-Wave Candle candlestick pattern indicator
HighWaveCandle(name: str)
HighWaveCandle()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.HighWaveCandle:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.HighWaveCandle:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.HighWaveCandle:
pass
IsReady: bool
class Hikkake(QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern, System.IComparable, QuantConnect.Indicators.IIndicator[IBaseDataBar], QuantConnect.Indicators.IIndicator, System.IComparable[IIndicator[IBaseDataBar]]):
"""
Hikkake candlestick pattern
Hikkake(name: str)
Hikkake()
"""
def Reset(self) -> None:
pass
@typing.overload
def __init__(self, name: str) -> QuantConnect.Indicators.CandlestickPatterns.Hikkake:
pass
@typing.overload
def __init__(self) -> QuantConnect.Indicators.CandlestickPatterns.Hikkake:
pass
def __init__(self, *args) -> QuantConnect.Indicators.CandlestickPatterns.Hikkake:
pass
IsReady: bool
| 30.854599
| 241
| 0.733026
| 902
| 10,398
| 8.264967
| 0.069845
| 0.239034
| 0.302482
| 0.078873
| 0.817706
| 0.757076
| 0.757076
| 0.757076
| 0.652448
| 0.538162
| 0
| 0.000116
| 0.174168
| 10,398
| 336
| 242
| 30.946429
| 0.868056
| 0.108675
| 0
| 0.646341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.323171
| false
| 0.323171
| 0.02439
| 0
| 0.506098
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
15bd2b62fae7c144a8e3e8a5aa38e721c46ef272
| 13,169
|
py
|
Python
|
rotkehlchen/tests/exchanges/test_independentreserve.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/tests/exchanges/test_independentreserve.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/tests/exchanges/test_independentreserve.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import warnings as test_warnings
from unittest.mock import patch
import pytest
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.constants.assets import A_AUD, A_ETC, A_ETH
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.exchanges.data_structures import Location, Trade, TradeType
from rotkehlchen.exchanges.independentreserve import (
IR_TO_WORLD,
Independentreserve,
independentreserve_asset,
)
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.mock import MockResponse
def test_location():
exchange = Independentreserve('independentreserve1', 'a', b'a', object(), object())
assert exchange.location == Location.INDEPENDENTRESERVE
assert exchange.name == 'independentreserve1'
def test_assets_are_known():
exchange = Independentreserve('independentreserve1', 'a', b'a', object(), object())
response = exchange._api_query('get', 'Public', 'GetValidPrimaryCurrencyCodes')
for currency in response:
try:
independentreserve_asset(currency)
except UnknownAsset:
test_warnings.warn(UserWarning(
f'Found unknown primary asset {currency} in IndependentReserve. '
f'Support for it has to be added',
))
response = exchange._api_query('get', 'Public', 'GetValidSecondaryCurrencyCodes')
for currency in response:
try:
independentreserve_asset(currency)
except UnknownAsset:
test_warnings.warn(UserWarning(
f'Found unknown secondary asset {currency} in IndependentReserve. '
f'Support for it has to be added',
))
@pytest.mark.parametrize('should_mock_current_price_queries', [True])
def test_query_balances(
function_scope_independentreserve,
inquirer, # pylint: disable=unused-argument
):
"""Test all balances returned by IndependentReserve are proccessed properly"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aud", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Nzd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Sgd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xbt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eth", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xrp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ada", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dot", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Uni", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Link", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bch", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ltc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Mkr", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dai", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Comp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Snx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Grt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eos", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xlm", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Etc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bat", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Pmgt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Yfi", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aave", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Zrx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Omg", "TotalBalance": 150.55}]""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
balances, msg = exchange.query_balances()
assert msg == ''
assets_seen = {0}
for asset, balance in balances.items():
assert asset in IR_TO_WORLD.values()
assert asset not in assets_seen
assets_seen.add(asset)
assert balance.amount == FVal('150.55')
@pytest.mark.parametrize('should_mock_current_price_queries', [True])
def test_query_some_balances(
function_scope_independentreserve,
inquirer, # pylint: disable=unused-argument
):
"""Just like test_query_balances but make sure 0 balances are skipped"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 1.2, "CurrencyCode": "Aud", "TotalBalance": 2.5},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Nzd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Sgd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xbt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eth", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xrp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ada", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dot", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Uni", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Link", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bch", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ltc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Mkr", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dai", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Comp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Snx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Grt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eos", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xlm", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Etc", "TotalBalance": 100.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bat", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Pmgt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Yfi", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Aave", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Zrx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Omg", "TotalBalance": 0.0}]""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
balances, msg = exchange.query_balances()
assert msg == ''
assert balances == {
A_AUD: Balance(amount=FVal(2.5), usd_value=FVal(3.75)),
A_ETC: Balance(amount=FVal(100), usd_value=FVal(150)),
}
def test_query_trade_history(function_scope_independentreserve):
"""Happy path test for independentreserve trade history querying"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """{"Data": [
{"AvgPrice": 603.7,
"CreatedTimestampUtc": "2017-11-22T22:54:40.3249401Z",
"FeePercent": 0.005,
"OrderGuid": "foo1",
"OrderType": "MarketOffer",
"Original": {"Outstanding": 0.0, "Volume": 0.5, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 301.85,
"Volume": 0.5
}, {
"AvgPrice": 257.25,
"CreatedTimestampUtc": "2017-07-28T09:39:19.8799244Z",
"FeePercent": 0.005,
"OrderGuid": "foo2",
"OrderType": "MarketBid",
"Original": {"Outstanding": 0.0, "Volume": 2.64117379, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 679.44,
"Volume": 2.64117379
}],
"PageSize": 50,
"TotalItems": 2,
"TotalPages": 1}
""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
trades = exchange.query_trade_history(
start_ts=0,
end_ts=1565732120,
only_cache=False,
)
expected_trades = [
Trade(
timestamp=1501234760,
location=Location.INDEPENDENTRESERVE,
base_asset=A_ETH,
quote_asset=A_AUD,
trade_type=TradeType.BUY,
amount=FVal('2.64117379'),
rate=FVal('257.25'),
fee=FVal('0.01320586895'),
fee_currency=A_ETH,
link='foo2',
), Trade(
timestamp=1511391280,
location=Location.INDEPENDENTRESERVE,
base_asset=A_ETH,
quote_asset=A_AUD,
trade_type=TradeType.SELL,
amount=FVal('0.5'),
rate=FVal('603.7'),
fee=FVal('0.0025'),
fee_currency=A_ETH,
link='foo1',
)]
assert trades == expected_trades[::-1]
# TODO: Make a test for asset movements.
# Would need more mocking as it would require mocking of multiple calls
| 57.758772
| 148
| 0.659124
| 1,334
| 13,169
| 6.434033
| 0.176912
| 0.03437
| 0.182454
| 0.222999
| 0.766865
| 0.756146
| 0.748456
| 0.748456
| 0.725154
| 0.700454
| 0
| 0.056086
| 0.156504
| 13,169
| 227
| 149
| 58.013216
| 0.716601
| 0.038272
| 0
| 0.316583
| 0
| 0.301508
| 0.687184
| 0.028797
| 0
| 0
| 0
| 0.004405
| 0.060302
| 1
| 0.040201
| false
| 0
| 0.050251
| 0
| 0.105528
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ec6ca07ae28fa9514436d5f6b4c22e00552e3ad8
| 8,548
|
py
|
Python
|
pepdb/core/migrations/0073_auto_20160213_0335.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 7
|
2015-12-21T03:52:46.000Z
|
2020-07-24T19:17:23.000Z
|
pepdb/core/migrations/0073_auto_20160213_0335.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 12
|
2016-03-05T18:11:05.000Z
|
2021-06-17T20:20:03.000Z
|
pepdb/core/migrations/0073_auto_20160213_0335.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 4
|
2016-07-17T20:19:38.000Z
|
2021-03-23T12:47:20.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20160210_0246'),
]
operations = [
migrations.AddField(
model_name='declaration',
name='office_en',
field=models.CharField(max_length=512, null=True, verbose_name='\u0412\u0456\u0434\u043e\u043c\u0441\u0442\u0432\u043e', blank=True),
),
migrations.AddField(
model_name='declaration',
name='office_uk',
field=models.CharField(max_length=512, null=True, verbose_name='\u0412\u0456\u0434\u043e\u043c\u0441\u0442\u0432\u043e', blank=True),
),
migrations.AddField(
model_name='declaration',
name='position_en',
field=models.CharField(max_length=512, null=True, verbose_name='\u041f\u043e\u0441\u0430\u0434\u0430', blank=True),
),
migrations.AddField(
model_name='declaration',
name='position_uk',
field=models.CharField(max_length=512, null=True, verbose_name='\u041f\u043e\u0441\u0430\u0434\u0430', blank=True),
),
migrations.AddField(
model_name='declaration',
name='region_en',
field=models.CharField(max_length=50, null=True, verbose_name='\u0420\u0435\u0433\u0456\u043e\u043d', blank=True),
),
migrations.AddField(
model_name='declaration',
name='region_uk',
field=models.CharField(max_length=50, null=True, verbose_name='\u0420\u0435\u0433\u0456\u043e\u043d', blank=True),
),
migrations.AlterField(
model_name='company2company',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2company',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2company',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='company2country',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2company',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2country',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='date_confirmed_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='date_established_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
migrations.AlterField(
model_name='person2person',
name='date_finished_details',
field=models.IntegerField(default=0, verbose_name='\u0442\u043e\u0447\u043d\u0456\u0441\u0442\u044c', choices=[(0, '\u0422\u043e\u0447\u043d\u0430 \u0434\u0430\u0442\u0430'), (1, '\u0420\u0456\u043a \u0442\u0430 \u043c\u0456\u0441\u044f\u0446\u044c'), (2, '\u0422\u0456\u043b\u044c\u043a\u0438 \u0440\u0456\u043a')]),
),
]
| 71.233333
| 329
| 0.665185
| 1,042
| 8,548
| 5.368522
| 0.076775
| 0.053629
| 0.080443
| 0.077762
| 0.972471
| 0.972471
| 0.971755
| 0.956024
| 0.956024
| 0.945299
| 0
| 0.321524
| 0.167876
| 8,548
| 119
| 330
| 71.831933
| 0.464923
| 0.002457
| 0
| 0.876106
| 0
| 0.132743
| 0.509208
| 0.377713
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017699
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ec85a43e295e9f1e4d5dd54199fd0570655d1988
| 73,658
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/region_instance_group_manager.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/compute/region_instance_group_manager.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/compute/region_instance_group_manager.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RegionInstanceGroupManagerArgs', 'RegionInstanceGroupManager']
@pulumi.input_type
class RegionInstanceGroupManagerArgs:
def __init__(__self__, *,
base_instance_name: pulumi.Input[str],
versions: pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]],
auto_healing_policies: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RegionInstanceGroupManager resource.
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
pulumi.set(__self__, "base_instance_name", base_instance_name)
pulumi.set(__self__, "versions", versions)
if auto_healing_policies is not None:
pulumi.set(__self__, "auto_healing_policies", auto_healing_policies)
if description is not None:
pulumi.set(__self__, "description", description)
if distribution_policy_target_shape is not None:
pulumi.set(__self__, "distribution_policy_target_shape", distribution_policy_target_shape)
if distribution_policy_zones is not None:
pulumi.set(__self__, "distribution_policy_zones", distribution_policy_zones)
if name is not None:
pulumi.set(__self__, "name", name)
if named_ports is not None:
pulumi.set(__self__, "named_ports", named_ports)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if stateful_disks is not None:
pulumi.set(__self__, "stateful_disks", stateful_disks)
if target_pools is not None:
pulumi.set(__self__, "target_pools", target_pools)
if target_size is not None:
pulumi.set(__self__, "target_size", target_size)
if update_policy is not None:
pulumi.set(__self__, "update_policy", update_policy)
if wait_for_instances is not None:
pulumi.set(__self__, "wait_for_instances", wait_for_instances)
if wait_for_instances_status is not None:
pulumi.set(__self__, "wait_for_instances_status", wait_for_instances_status)
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> pulumi.Input[str]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@base_instance_name.setter
def base_instance_name(self, value: pulumi.Input[str]):
pulumi.set(self, "base_instance_name", value)
@property
@pulumi.getter
def versions(self) -> pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]):
pulumi.set(self, "versions", value)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@auto_healing_policies.setter
def auto_healing_policies(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]):
pulumi.set(self, "auto_healing_policies", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> Optional[pulumi.Input[str]]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@distribution_policy_target_shape.setter
def distribution_policy_target_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_policy_target_shape", value)
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@distribution_policy_zones.setter
def distribution_policy_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "distribution_policy_zones", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@named_ports.setter
def named_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]):
pulumi.set(self, "named_ports", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@stateful_disks.setter
def stateful_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]):
pulumi.set(self, "stateful_disks", value)
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@target_pools.setter
def target_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_pools", value)
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> Optional[pulumi.Input[int]]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@target_size.setter
def target_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_size", value)
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@update_policy.setter
def update_policy(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]):
pulumi.set(self, "update_policy", value)
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@wait_for_instances.setter
def wait_for_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_instances", value)
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> Optional[pulumi.Input[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
@wait_for_instances_status.setter
def wait_for_instances_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_instances_status", value)
@pulumi.input_type
class _RegionInstanceGroupManagerState:
def __init__(__self__, *,
auto_healing_policies: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
instance_group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]] = None,
statuses: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RegionInstanceGroupManager resources.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
if auto_healing_policies is not None:
pulumi.set(__self__, "auto_healing_policies", auto_healing_policies)
if base_instance_name is not None:
pulumi.set(__self__, "base_instance_name", base_instance_name)
if description is not None:
pulumi.set(__self__, "description", description)
if distribution_policy_target_shape is not None:
pulumi.set(__self__, "distribution_policy_target_shape", distribution_policy_target_shape)
if distribution_policy_zones is not None:
pulumi.set(__self__, "distribution_policy_zones", distribution_policy_zones)
if fingerprint is not None:
pulumi.set(__self__, "fingerprint", fingerprint)
if instance_group is not None:
pulumi.set(__self__, "instance_group", instance_group)
if name is not None:
pulumi.set(__self__, "name", name)
if named_ports is not None:
pulumi.set(__self__, "named_ports", named_ports)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if stateful_disks is not None:
pulumi.set(__self__, "stateful_disks", stateful_disks)
if statuses is not None:
pulumi.set(__self__, "statuses", statuses)
if target_pools is not None:
pulumi.set(__self__, "target_pools", target_pools)
if target_size is not None:
pulumi.set(__self__, "target_size", target_size)
if update_policy is not None:
pulumi.set(__self__, "update_policy", update_policy)
if versions is not None:
pulumi.set(__self__, "versions", versions)
if wait_for_instances is not None:
pulumi.set(__self__, "wait_for_instances", wait_for_instances)
if wait_for_instances_status is not None:
pulumi.set(__self__, "wait_for_instances_status", wait_for_instances_status)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@auto_healing_policies.setter
def auto_healing_policies(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]):
pulumi.set(self, "auto_healing_policies", value)
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> Optional[pulumi.Input[str]]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@base_instance_name.setter
def base_instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_instance_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> Optional[pulumi.Input[str]]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@distribution_policy_target_shape.setter
def distribution_policy_target_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_policy_target_shape", value)
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@distribution_policy_zones.setter
def distribution_policy_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "distribution_policy_zones", value)
@property
@pulumi.getter
def fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The fingerprint of the instance group manager.
"""
return pulumi.get(self, "fingerprint")
@fingerprint.setter
def fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fingerprint", value)
@property
@pulumi.getter(name="instanceGroup")
def instance_group(self) -> Optional[pulumi.Input[str]]:
"""
The full URL of the instance group created by the manager.
"""
return pulumi.get(self, "instance_group")
@instance_group.setter
def instance_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_group", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@named_ports.setter
def named_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]):
pulumi.set(self, "named_ports", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the created resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@stateful_disks.setter
def stateful_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]):
pulumi.set(self, "stateful_disks", value)
@property
@pulumi.getter
def statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]:
"""
The status of this managed instance group.
"""
return pulumi.get(self, "statuses")
@statuses.setter
def statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]):
pulumi.set(self, "statuses", value)
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@target_pools.setter
def target_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_pools", value)
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> Optional[pulumi.Input[int]]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@target_size.setter
def target_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_size", value)
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@update_policy.setter
def update_policy(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]):
pulumi.set(self, "update_policy", value)
@property
@pulumi.getter
def versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]):
pulumi.set(self, "versions", value)
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@wait_for_instances.setter
def wait_for_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_instances", value)
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> Optional[pulumi.Input[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
@wait_for_instances_status.setter
def wait_for_instances_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_instances_status", value)
class RegionInstanceGroupManager(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionInstanceGroupManagerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param RegionInstanceGroupManagerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionInstanceGroupManagerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionInstanceGroupManagerArgs.__new__(RegionInstanceGroupManagerArgs)
__props__.__dict__["auto_healing_policies"] = auto_healing_policies
if base_instance_name is None and not opts.urn:
raise TypeError("Missing required property 'base_instance_name'")
__props__.__dict__["base_instance_name"] = base_instance_name
__props__.__dict__["description"] = description
__props__.__dict__["distribution_policy_target_shape"] = distribution_policy_target_shape
__props__.__dict__["distribution_policy_zones"] = distribution_policy_zones
__props__.__dict__["name"] = name
__props__.__dict__["named_ports"] = named_ports
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["stateful_disks"] = stateful_disks
__props__.__dict__["target_pools"] = target_pools
__props__.__dict__["target_size"] = target_size
__props__.__dict__["update_policy"] = update_policy
if versions is None and not opts.urn:
raise TypeError("Missing required property 'versions'")
__props__.__dict__["versions"] = versions
__props__.__dict__["wait_for_instances"] = wait_for_instances
__props__.__dict__["wait_for_instances_status"] = wait_for_instances_status
__props__.__dict__["fingerprint"] = None
__props__.__dict__["instance_group"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["statuses"] = None
super(RegionInstanceGroupManager, __self__).__init__(
'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
instance_group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None) -> 'RegionInstanceGroupManager':
"""
Get an existing RegionInstanceGroupManager resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegionInstanceGroupManagerState.__new__(_RegionInstanceGroupManagerState)
__props__.__dict__["auto_healing_policies"] = auto_healing_policies
__props__.__dict__["base_instance_name"] = base_instance_name
__props__.__dict__["description"] = description
__props__.__dict__["distribution_policy_target_shape"] = distribution_policy_target_shape
__props__.__dict__["distribution_policy_zones"] = distribution_policy_zones
__props__.__dict__["fingerprint"] = fingerprint
__props__.__dict__["instance_group"] = instance_group
__props__.__dict__["name"] = name
__props__.__dict__["named_ports"] = named_ports
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["self_link"] = self_link
__props__.__dict__["stateful_disks"] = stateful_disks
__props__.__dict__["statuses"] = statuses
__props__.__dict__["target_pools"] = target_pools
__props__.__dict__["target_size"] = target_size
__props__.__dict__["update_policy"] = update_policy
__props__.__dict__["versions"] = versions
__props__.__dict__["wait_for_instances"] = wait_for_instances
__props__.__dict__["wait_for_instances_status"] = wait_for_instances_status
return RegionInstanceGroupManager(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> pulumi.Output[Optional['outputs.RegionInstanceGroupManagerAutoHealingPolicies']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> pulumi.Output[str]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> pulumi.Output[str]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> pulumi.Output[Sequence[str]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint of the instance group manager.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter(name="instanceGroup")
def instance_group(self) -> pulumi.Output[str]:
"""
The full URL of the instance group created by the manager.
"""
return pulumi.get(self, "instance_group")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerNamedPort']]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
The URL of the created resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerStatefulDisk']]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerStatus']]:
"""
The status of this managed instance group.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> pulumi.Output[int]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> pulumi.Output['outputs.RegionInstanceGroupManagerUpdatePolicy']:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@property
@pulumi.getter
def versions(self) -> pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerVersion']]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> pulumi.Output[Optional[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
| 58.644904
| 578
| 0.695362
| 8,431
| 73,658
| 5.912347
| 0.044479
| 0.067747
| 0.0587
| 0.031777
| 0.946918
| 0.937729
| 0.926114
| 0.919955
| 0.911449
| 0.895179
| 0
| 0.0019
| 0.213921
| 73,658
| 1,255
| 579
| 58.691633
| 0.859001
| 0.485582
| 0
| 0.809444
| 1
| 0
| 0.167389
| 0.106528
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166948
| false
| 0.001686
| 0.011804
| 0
| 0.279933
| 0.021922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ec8e3ac68d0e1ab6d9a5e9aaa3d5b118b2ebf02b
| 259
|
py
|
Python
|
03_GraphBasedPlanner/graph_ltpl/offline_graph/src/__init__.py
|
f1tenth/ESweek2021_educationclassA3
|
7620a36d21c1824efba8a83f0671926bf8e028f3
|
[
"MIT"
] | 15
|
2021-10-09T13:48:49.000Z
|
2022-03-27T04:36:44.000Z
|
03_GraphBasedPlanner/graph_ltpl/offline_graph/src/__init__.py
|
yinflight/ESweek2021_educationclassA3
|
7a32bacdb7f3154a773d28b6b6abffdaa154a526
|
[
"MIT"
] | 1
|
2021-11-27T01:47:25.000Z
|
2021-11-27T02:44:04.000Z
|
03_GraphBasedPlanner/graph_ltpl/offline_graph/src/__init__.py
|
yinflight/ESweek2021_educationclassA3
|
7a32bacdb7f3154a773d28b6b6abffdaa154a526
|
[
"MIT"
] | 2
|
2021-11-03T19:32:55.000Z
|
2021-11-27T02:43:13.000Z
|
import graph_ltpl.offline_graph.src.gen_edges
import graph_ltpl.offline_graph.src.gen_node_skeleton
import graph_ltpl.offline_graph.src.gen_offline_cost
import graph_ltpl.offline_graph.src.main_offline_callback
import graph_ltpl.offline_graph.src.prune_graph
| 43.166667
| 57
| 0.903475
| 43
| 259
| 5.023256
| 0.302326
| 0.25463
| 0.347222
| 0.509259
| 0.736111
| 0.736111
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0.03861
| 259
| 5
| 58
| 51.8
| 0.86747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
eca3a791b5ba3aeb2559468375ed11393b715dab
| 75
|
py
|
Python
|
tests/t37.py
|
jplevyak/pyc
|
9f4bc49be78ba29427841460945ce63826fcd857
|
[
"BSD-3-Clause"
] | 3
|
2019-08-21T22:01:35.000Z
|
2021-07-25T00:21:28.000Z
|
tests/t37.py
|
jplevyak/pyc
|
9f4bc49be78ba29427841460945ce63826fcd857
|
[
"BSD-3-Clause"
] | null | null | null |
tests/t37.py
|
jplevyak/pyc
|
9f4bc49be78ba29427841460945ce63826fcd857
|
[
"BSD-3-Clause"
] | null | null | null |
a = (1, "asdf", 2.0)
a = (2, "fdsa", 3.0)
print a[0]
print a[1]
print a[2]
| 12.5
| 20
| 0.493333
| 19
| 75
| 1.947368
| 0.421053
| 0.486486
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 0.213333
| 75
| 5
| 21
| 15
| 0.474576
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.6
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
ecc45f44644fd0ea60f0ea35ec46ed2f1b8b0a35
| 2,144
|
py
|
Python
|
epoch_generate_particles_files/save_data.py
|
georgeholt1/epoch-generate-particles-files
|
c4810712e0b616545409829a2e6ab1364f468b18
|
[
"MIT"
] | null | null | null |
epoch_generate_particles_files/save_data.py
|
georgeholt1/epoch-generate-particles-files
|
c4810712e0b616545409829a2e6ab1364f468b18
|
[
"MIT"
] | null | null | null |
epoch_generate_particles_files/save_data.py
|
georgeholt1/epoch-generate-particles-files
|
c4810712e0b616545409829a2e6ab1364f468b18
|
[
"MIT"
] | null | null | null |
# Author: George K. Holt
# License: MIT
# Version: 0.1
"""
Part of EPOCH Generate Particles Files.
Functions to save the generated data.
"""
import numpy as np
import os
def save_1d(x_list, w_list, out_dir):
'''Save 1D particle data.
Parameters
----------
x_list : list
List of x-coordinate values.
w_list : list
List of weight values.
out_dir : str
Path to output directory.
'''
with open(os.path.join(out_dir, 'x_data.dat'), 'wb') as f:
f.write(np.array(x_list).tobytes())
with open(os.path.join(out_dir, 'w_data.dat'), 'wb') as f:
f.write(np.array(w_list).tobytes())
def save_2d(x_list, y_list, w_list, out_dir):
'''Save 2D particle data.
Parameters
----------
x_list : list
List of x-coordinate values.
y_list : list
List of y-coordinate values.
w_list : list
List of weight values.
out_dir : str
Path to output directory.
'''
with open(os.path.join(out_dir, 'x_data.dat'), 'wb') as f:
f.write(np.array(x_list).tobytes())
with open(os.path.join(out_dir, 'y_data.dat'), 'wb') as f:
f.write(np.array(y_list).tobytes())
with open(os.path.join(out_dir, 'w_data.dat'), 'wb') as f:
f.write(np.array(w_list).tobytes())
def save_3d(x_list, y_list, z_list, w_list, out_dir):
'''Save 3D particle data.
Parameters
----------
x_list : list
List of x-coordinate values.
y_list : list
List of y-coordinate values.
z_list : list
List of z-coordinate values.
w_list : list
List of weight values.
out_dir : str
Path to output directory.
'''
with open(os.path.join(out_dir, 'x_data.dat'), 'wb') as f:
f.write(np.array(x_list).tobytes())
with open(os.path.join(out_dir, 'y_data.dat'), 'wb') as f:
f.write(np.array(y_list).tobytes())
with open(os.path.join(out_dir, 'z_data.dat'), 'wb') as f:
f.write(np.array(z_list).tobytes())
with open(os.path.join(out_dir, 'w_data.dat'), 'wb') as f:
f.write(np.array(w_list).tobytes())
| 28.586667
| 62
| 0.593284
| 345
| 2,144
| 3.530435
| 0.15942
| 0.118227
| 0.08867
| 0.103448
| 0.853859
| 0.853859
| 0.807061
| 0.807061
| 0.807061
| 0.786535
| 0
| 0.005019
| 0.25653
| 2,144
| 75
| 63
| 28.586667
| 0.759097
| 0.374534
| 0
| 0.695652
| 1
| 0
| 0.092308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
01ac7d8f081e9a5d26928d46d03c73aa9a9b575d
| 2,698
|
py
|
Python
|
tests/sentry/coreapi/test_auth_from_request.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 4
|
2019-05-27T13:55:07.000Z
|
2021-03-30T07:05:09.000Z
|
tests/sentry/coreapi/test_auth_from_request.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 196
|
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
tests/sentry/coreapi/test_auth_from_request.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 1
|
2020-08-10T07:55:40.000Z
|
2020-08-10T07:55:40.000Z
|
from __future__ import absolute_import
import mock
import pytest
from django.core.exceptions import SuspiciousOperation
from sentry.coreapi import ClientAuthHelper, APIUnauthorized
def test_valid():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_missing_space():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_ignore_case():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'SeNtRy sentry_key=value, biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_invalid_header_defers_to_GET():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_legacy_header_defers_to_GET():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_AUTHORIZATION': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_header_bad_token():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentryfoo'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_invalid_header_missing_pair():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry foo'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_invalid_malformed_value():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,,biz=baz'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_multiple_auth_suspicious():
helper = ClientAuthHelper()
request = mock.Mock()
request.GET = {'sentry_version': '1', 'foo': 'bar'}
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
with pytest.raises(SuspiciousOperation):
helper.auth_from_request(request)
| 29.977778
| 77
| 0.702372
| 325
| 2,698
| 5.550769
| 0.169231
| 0.034922
| 0.144678
| 0.164634
| 0.817627
| 0.802106
| 0.781596
| 0.764967
| 0.764967
| 0.764967
| 0
| 0.002244
| 0.174203
| 2,698
| 89
| 78
| 30.314607
| 0.807451
| 0
| 0
| 0.720588
| 0
| 0
| 0.160119
| 0.018162
| 0
| 0
| 0
| 0
| 0.073529
| 1
| 0.132353
| false
| 0
| 0.073529
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
01ad28c5f9860e754e7ef1bd51c1217ab17e12ba
| 10,939
|
py
|
Python
|
tests/connect_tests.py
|
adyekjaer/VersionOne.SDK.Python
|
e83764be029019c2ee3229157ae873f22c17476f
|
[
"BSD-3-Clause"
] | 2
|
2018-08-05T18:44:42.000Z
|
2018-12-12T00:54:58.000Z
|
tests/connect_tests.py
|
adyekjaer/VersionOne.SDK.Python
|
e83764be029019c2ee3229157ae873f22c17476f
|
[
"BSD-3-Clause"
] | 25
|
2018-06-13T01:03:30.000Z
|
2019-12-22T01:49:01.000Z
|
tests/connect_tests.py
|
adyekjaer/VersionOne.SDK.Python
|
e83764be029019c2ee3229157ae873f22c17476f
|
[
"BSD-3-Clause"
] | 5
|
2018-08-23T08:43:43.000Z
|
2021-03-22T07:25:14.000Z
|
from testtools import TestCase
from testtools.assertions import assert_that
from testtools.matchers import Equals
from testtools.content import text_content
import sys
if sys.version_info >= (3,0):
from urllib.error import HTTPError
else:
from urllib2 import HTTPError
# try the old version, then fallback to the new one
try:
from xml.etree import ElementTree
from xml.etree.ElementTree import parse, fromstring, Element
except ImportError:
from elementtree import ElementTree
from elementtree.ElementTree import parse, fromstring, Element
from v1pysdk.client import *
from v1pysdk import V1Meta
from .common_test_server import PublicTestServerConnection
class TestV1Connection(TestCase):
def test_connect(self):
username = PublicTestServerConnection.username
password = PublicTestServerConnection.password
address = PublicTestServerConnection.address
instance = PublicTestServerConnection.instance
self.addDetail('URL', text_content(address + "/" + instance))
self.addDetail('username', text_content(username))
server = V1Server(address=address, username=username, password=password,instance=instance)
# The story names, but limit to only the first result so we don't get inundated with results
code, body = server.fetch('/rest-1.v1/Data/Story?sel=Name&page=1,0')
self.addDetail('Code', text_content(str(code)))
self.addDetail('Body', text_content(str(body)))
elem = fromstring(body)
self.assertThat(elem.tag, Equals('Assets'))
def test_meta_connect_instance_url(self):
v1 = None
self.addDetail('URL', text_content(PublicTestServerConnection.instance_url))
self.addDetail('username', text_content(PublicTestServerConnection.username))
try:
v1 = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
username = PublicTestServerConnection.username,
password = PublicTestServerConnection.password,
)
except Exception as e:
assert_that(False, Equals(True), message="Error trying to create connection: " + str(e))
try:
items = v1.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from connection: " + str(e))
def test_meta_connect_instance_and_address(self):
v1 = None
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
self.addDetail('username', text_content(PublicTestServerConnection.username))
try:
v1 = V1Meta(
address = PublicTestServerConnection.address,
instance = PublicTestServerConnection.instance,
username = PublicTestServerConnection.username,
password = PublicTestServerConnection.password,
)
except Exception as e:
assert_that(False, Equals(True), message="Error trying to create connection: " + str(e))
try:
items = v1.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from connection: " + str(e))
def test_meta_connect_instance_url_overrides_separate(self):
v1 = None
address = self.getUniqueString() #garbage
instance = self.getUniqueString() #garbage
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance-url', text_content(PublicTestServerConnection.instance_url))
self.addDetail('instance', text_content(address))
self.addDetail('username', text_content(instance))
try:
v1 = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
address = address,
instance = instance,
username = PublicTestServerConnection.username,
password = PublicTestServerConnection.password,
)
except Exception as e:
assert_that(False, Equals(True), message="Error trying to create connection: " + str(e))
try:
items = v1.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from connection: " + str(e))
def test_meta_connect_oauth(self):
v1 = None
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
try:
v1 = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
#no username
password = PublicTestServerConnection.token,
use_password_as_token=True,
)
except Exception as e:
assert_that(False, Equals(True), message="Error trying to create connection: " + str(e))
try:
items = v1.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from connection: " + str(e))
def test_meta_connect_oauth_ignores_username(self):
v1 = None
username = self.getUniqueString() #garbage
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
self.addDetail('username', text_content(username))
try:
v1 = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
username = username,
password = PublicTestServerConnection.token,
use_password_as_token=True,
)
except Exception as e:
assert_that(False, Equals(True), message="Error trying to create connection: " + str(e))
try:
items = v1.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from connection: " + str(e))
def test_connect_fails_when_invalid(self):
v1bad = None
username = self.getUniqueString() #garbage
password = self.getUniqueString() #garbage
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
self.addDetail('bad-username', text_content(username))
self.addDetail('bad-password', text_content(password))
try:
v1bad = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
username = username,
password = password,
use_password_as_token=False,
)
# we have to try to use it to get it to connect and fail
items = v1bad.Story.select('Name').page(size=1)
items.first() #run the query
except HTTPError as e:
assert_that(e.code, Equals(401), message="Connection failed for reasons other than authorization")
else:
assert_that(False, Equals(True), message="Connection succeeded with bad credentials")
def test_reconnect_succeeds_after_invalid(self):
v1bad = None
username = self.getUniqueString() #garbage
password = self.getUniqueString() #garbage
self.addDetail('bad-username', text_content(username))
self.addDetail('bad-password', text_content(password))
try:
v1bad = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
username = username,
password = password,
use_password_as_token=False,
)
items = v1bad.Story.select('Name').page(size=1)
items.first() #run the query
except HTTPError as e:
assert_that(e.code, Equals(401), message="Connection failed for reasons other than authorization")
else:
assert_that(False, Equals(True), message="First connection succeeded with bad credentials, cannot continue test")
v1good = None
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
# Connect correctly first
try:
v1good = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
password = PublicTestServerConnection.token,
use_password_as_token=True,
)
items = v1good.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from good connection: " + str(e))
def test_reconnect_fails_when_invalid(self):
v1good = None
self.addDetail('address', text_content(PublicTestServerConnection.address))
self.addDetail('instance', text_content(PublicTestServerConnection.instance))
# Connect correctly first
try:
v1good = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
password = PublicTestServerConnection.token,
use_password_as_token=True,
)
items = v1good.Story.select('Name').page(size=1)
items.first() #run the query
except Exception as e:
assert_that(False, Equals(True), message="Error running query from good connection, cannot perform test: " + str(e))
v1bad = None
username = self.getUniqueString() #garbage
password = self.getUniqueString() #garbage
self.addDetail('bad-username', text_content(username))
self.addDetail('bad-password', text_content(password))
try:
v1bad = V1Meta(
instance_url = PublicTestServerConnection.instance_url,
username = username,
password = password,
use_password_as_token=False,
)
items = v1bad.Story.select('Name').page(size=1)
items.first() #run the query
except HTTPError as e:
assert_that(e.code, Equals(401), message="Connection failed for reasons other than authorization")
else:
assert_that(False, Equals(True), message="Second connection succeeded with bad credentials")
| 43.756
| 128
| 0.642472
| 1,109
| 10,939
| 6.220018
| 0.127142
| 0.049435
| 0.091186
| 0.028269
| 0.835605
| 0.790229
| 0.750797
| 0.743839
| 0.727167
| 0.708901
| 0
| 0.008886
| 0.269586
| 10,939
| 249
| 129
| 43.931727
| 0.854443
| 0.040863
| 0
| 0.70892
| 0
| 0
| 0.10644
| 0.003726
| 0
| 0
| 0
| 0
| 0.093897
| 1
| 0.042254
| false
| 0.117371
| 0.070423
| 0
| 0.117371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
01f0612a28845af33cf6f66db33f77494cd79876
| 7,264
|
py
|
Python
|
metaflow/tests/flows/joins.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | 1
|
2022-01-07T22:32:27.000Z
|
2022-01-07T22:32:27.000Z
|
metaflow/tests/flows/joins.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | null | null | null |
metaflow/tests/flows/joins.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | null | null | null |
from metaflow import FlowSpec
from metaflow import api as ma
from metaflow.api import foreach, step, join
class OldJoinFlow1(FlowSpec):
@step
def start(self):
self.next(self.generate_ints)
@step
def generate_ints(self):
self.ints = list(range(1, 16))
self.next(self.test_prime, foreach='ints')
@step
def test_prime(self):
n = self.input
self.is_prime = n >= 2
i = 2
while i*i <= n:
if n % i == 0:
self.is_prime = False
break
i += 1
self.next(self.fizzbuzz)
@step
def fizzbuzz(self):
n = self.input
self.n = n
if n % 15 == 0:
self.fb = 'fizzbuzz'
elif n % 3 == 0:
self.fb = 'fizz'
elif n % 5 == 0:
self.fb = 'buzz'
self.next(self.join)
@step
def join(self, branches):
self.results = [
{
'n': branch.n,
'is_prime': branch.is_prime,
**({ 'fizzbuzz': branch.fb } if hasattr(branch, 'fb') else {})
}
for branch in branches
]
self.next(self.end)
@step
def end(self):
pass
class NewJoinFlow1(ma.FlowSpec):
@step
def generate_ints(self):
self.ints = list(range(1, 16))
@foreach('ints')
def test_prime(self, n):
self.n = n
self.is_prime = n >= 2
i = 2
while i*i <= n:
if n % i == 0:
self.is_prime = False
break
i += 1
@step
def fizzbuzz(self):
n = self.input
if n % 15 == 0:
self.fb = 'fizzbuzz'
elif n % 3 == 0:
self.fb = 'fizz'
elif n % 5 == 0:
self.fb = 'buzz'
@join('fizzbuzz')
def join(self, branches):
self.results = [
{
'n': branch.n,
'is_prime': branch.is_prime,
**({ 'fizzbuzz': branch.fb } if hasattr(branch, 'fb') else {})
}
for branch in branches
]
class OldJoinFlow2(FlowSpec):
@step
def start(self):
self.next(self.generate_ints)
@step
def generate_ints(self):
self.ints = list(range(1, 16))
self.next(self.test_prime, foreach='ints')
@step
def test_prime(self):
n = self.input
self.is_prime = n >= 2
i = 2
while i*i <= n:
if n % i == 0:
self.is_prime = False
break
i += 1
self.next(self.fizzbuzz)
@step
def fizzbuzz(self):
n = self.input
self.n = n
if n % 15 == 0:
self.fb = 'fizzbuzz'
elif n % 3 == 0:
self.fb = 'fizz'
elif n % 5 == 0:
self.fb = 'buzz'
self.next(self.join)
@step
def join(self, branches):
self.results = [
{
'n': branch.n,
'is_prime': branch.is_prime,
**({ 'fizzbuzz': branch.fb } if hasattr(branch, 'fb') else {})
}
for branch in branches
]
self.next(self.filter_odds)
@step
def filter_odds(self):
self.odds = [ r for r in self.results if r['n'] % 2 == 1 ]
self.next(self.end)
@step
def end(self):
pass
class NewJoinFlow2(ma.FlowSpec):
@step
def generate_ints(self):
self.ints = list(range(1, 16))
@foreach('ints')
def test_prime(self):
n = self.input
self.n = n
self.is_prime = n >= 2
i = 2
while i*i <= n:
if n % i == 0:
self.is_prime = False
break
i += 1
@step
def fizzbuzz(self):
n = self.input
if n % 15 == 0:
self.fb = 'fizzbuzz'
elif n % 3 == 0:
self.fb = 'fizz'
elif n % 5 == 0:
self.fb = 'buzz'
@join('fizzbuzz')
def join(self, branches):
self.results = [
{
'n': branch.n,
'is_prime': branch.is_prime,
**({ 'fizzbuzz': branch.fb } if hasattr(branch, 'fb') else {})
}
for branch in branches
]
@step
def filter_odds(self):
self.odds = [ r for r in self.results if r['n'] % 2 == 1 ]
class OldForeachSplitAnd(FlowSpec):
@step
def start(self):
self.items = [1,2,3,4]
self.next(self.foreach, foreach='items')
@step
def foreach(self):
n = self.input
self.n = n
self.n2 = n*n
self.next(self.f1, self.f2)
@step
def f1(self):
self.n3 = self.n * self.n2
self.next(self.f3)
@step
def f2(self):
self.n4 = self.n2 * self.n2
self.next(self.f3)
@step
def f3(self, inputs):
assert not hasattr(self, 'n2')
assert not hasattr(self, 'n3')
assert not hasattr(self, 'n4')
self.merge_artifacts(inputs)
n = self.n
assert (n, self.n2, self.n3, self.n4) == (n, n**2, n**3, n**4)
self.n5 = self.n2 * self.n3
self.next(self.join_foreach)
@step
def join_foreach(self, inputs):
assert not hasattr(self, 'items')
assert not hasattr(self, 'n')
assert not hasattr(self, 'n2')
assert not hasattr(self, 'n3')
assert not hasattr(self, 'n4')
self.s = sum(input.n for input in inputs)
self.s2 = sum(input.n2 for input in inputs)
self.s3 = sum(input.n3 for input in inputs)
self.s4 = sum(input.n4 for input in inputs)
self.s5 = sum(input.n5 for input in inputs)
self.next(self.end)
@step
def end(self):
assert not hasattr(self, 'items')
assert (self.s, self.s2, self.s3, self.s4, self.s5,) == (10, 30, 100, 354, 1300,)
class NewForeachSplitAnd(ma.FlowSpec):
@step
def start(self):
self.items = [1,2,3,4]
@foreach('items')
def foreach(self, n):
self.n = n
self.n2 = n*n
@step('foreach')
def f1(self):
self.n3 = self.n * self.n2
@step('foreach')
def f2(self):
self.n4 = self.n2 * self.n2
@join('f1','f2')
def f3(self, inputs):
assert not hasattr(self, 'n2')
assert not hasattr(self, 'n3')
assert not hasattr(self, 'n4')
self.merge_artifacts(inputs)
n = self.n
assert (n, self.n2, self.n3, self.n4) == (n, n**2, n**3, n**4)
self.n5 = self.n2 * self.n3
@join
def join_foreach(self, inputs):
assert not hasattr(self, 'items')
assert not hasattr(self, 'n')
assert not hasattr(self, 'n2')
assert not hasattr(self, 'n3')
assert not hasattr(self, 'n4')
self.s = sum(input.n for input in inputs)
self.s2 = sum(input.n2 for input in inputs)
self.s3 = sum(input.n3 for input in inputs)
self.s4 = sum(input.n4 for input in inputs)
self.s5 = sum(input.n5 for input in inputs)
@step
def end(self):
assert not hasattr(self, 'items')
assert (self.s, self.s2, self.s3, self.s4, self.s5,) == (10, 30, 100, 354, 1300,)
| 24.791809
| 89
| 0.488161
| 961
| 7,264
| 3.652445
| 0.087409
| 0.053846
| 0.082051
| 0.102564
| 0.905698
| 0.89886
| 0.897436
| 0.892308
| 0.869516
| 0.839316
| 0
| 0.039547
| 0.380369
| 7,264
| 292
| 90
| 24.876712
| 0.74028
| 0
| 0
| 0.876
| 1
| 0
| 0.034141
| 0
| 0
| 0
| 0
| 0
| 0.088
| 1
| 0.144
| false
| 0.008
| 0.012
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
01fb8b53d22fdf00f38594de4a2d3cc035949ebb
| 47,890
|
py
|
Python
|
test/integration/ggrc/converters/test_import_issuetracked_objects.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/integration/ggrc/converters/test_import_issuetracked_objects.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/converters/test_import_issuetracked_objects.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for IssueTracker updates via import cases."""
# pylint: disable=invalid-name,too-many-public-methods,too-many-lines
import collections
import ddt
import mock
from ggrc import db
from ggrc import models
from ggrc import settings
from ggrc.converters import errors
from ggrc.converters.handlers import issue_tracker
from ggrc.integrations import constants
from ggrc.integrations import issuetracker_bulk_sync
from ggrc.integrations.constants import DEFAULT_ISSUETRACKER_VALUES as \
default_values
from ggrc.models import all_models
from integration import ggrc
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
def expected_warning_for_default(line, column_name, alias):
"""Generate expected warning message"""
if alias in ("Severity", "Issue Type", "Priority"):
return errors.WRONG_VALUE_DEFAULT_CUSTOM.format(
line=line,
column_name=alias,
default_value=constants.DEFAULT_ISSUETRACKER_VALUES.get(column_name)
)
return errors.WRONG_VALUE_DEFAULT.format(line=line, column_name=alias)
@ddt.ddt
class TestIssueTrackedImport(ggrc.TestCase):
"""Test cases for IssueTracker integration via import."""
def setUp(self):
"""setUp"""
# pylint: disable=super-on-old-class
super(TestIssueTrackedImport, self).setUp()
self.api = Api()
self.client.get("/login")
self.patch_create_issue = mock.patch(
'ggrc.integrations.issues.Client.create_issue')
self.mock_create_issue = self.patch_create_issue.start()
self.patch_update_issue = mock.patch(
'ggrc.integrations.issues.Client.update_issue')
self.mock_update_issue = self.patch_update_issue.start()
def tearDown(self):
"""tearDown"""
self.patch_update_issue.stop()
self.patch_create_issue.stop()
@ddt.data(
("Issue", "Issue", "component_id", "Component ID", 123),
("Issue", "Issue", "hotlist_id", "Hotlist ID", 321),
("Issue", "Issue", "issue_priority", "Priority", "P1"),
("Issue", "Issue", "issue_severity", "Severity", "S1"),
("Issue", "Issue", "issue_type", "Issue Type", "PROCESS"),
("Issue", "Issue", "title", "Ticket Title", "iti_title"),
("Assessment", "Assessment", "component_id", "Component ID", 123),
("Assessment", "Assessment", "hotlist_id", "Hotlist ID", 321),
("Assessment", "Assessment", "issue_priority", "Priority", "P1"),
("Assessment", "Assessment", "issue_severity", "Severity", "S1"),
("Assessment", "Assessment", "issue_type", "Issue Type", "PROCESS"),
("Assessment", "Assessment", "title", "Ticket Title", "iti_title"),
("Audit", "Audit", "component_id", "Component ID", 123),
("Audit", "Audit", "hotlist_id", "Hotlist ID", 321),
("Audit", "Audit", "issue_priority", "Priority", "P1"),
("Audit", "Audit", "issue_severity", "Severity", "S1"),
("Audit", "Audit", "issue_type", "Issue Type", "PROCESS"),
("AssessmentTemplate", "Assessment Template", "component_id",
"Component ID", 123),
("AssessmentTemplate", "Assessment Template", "hotlist_id",
"Hotlist ID", 321),
("AssessmentTemplate", "Assessment Template", "issue_priority",
"Priority", "P1"),
("AssessmentTemplate", "Assessment Template", "issue_severity",
"Severity", "S1"),
("AssessmentTemplate", "Assessment Template", "issue_type",
"Issue Type", "PROCESS"),
)
@ddt.unpack
def test_import_update_succeed(self, model, model_name, field, alias, value):
# pylint: disable=too-many-arguments
"""Test {0} {2} set correctly during update via import."""
with factories.single_commit():
factory = factories.get_model_factory(model)
obj = factory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=obj,
)
response = self.import_data(collections.OrderedDict([
("object_type", model_name),
("Code*", obj.slug),
(alias, value),
]))
obj = models.get_model(model).query.one()
self._check_csv_response(response, {})
self.assertEqual(str(obj.issue_tracker[field]), str(value))
@ddt.data(
("component_id", "Component ID", 123),
("hotlist_id", "Hotlist ID", 321),
("issue_priority", "Priority", "P1"),
("issue_severity", "Severity", "S1"),
("issue_type", "Issue Type", "PROCESS"),
("title", "Ticket Title", "iti_title"),
)
@ddt.unpack
def test_issue_import_create_succeed(self, field, alias, value):
"""Test Issue {1} set correctly during create via import."""
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Object Title"),
("Due Date*", "2016-10-24T15:35:37"),
(alias, value),
]))
self._check_csv_response(response, {})
obj = all_models.Issue.query.one()
self.assertEqual(str(obj.issue_tracker[field]), str(value))
@ddt.data(
("component_id", "Component ID", 555),
("hotlist_id", "Hotlist ID", 444),
("issue_priority", "Priority", "P2"),
("issue_severity", "Severity", "S2"),
("issue_type", "Issue Type", "PROCESS"),
("title", "Ticket Title", "iti_title"),
)
@ddt.unpack
def test_assmt_import_create_succeed(self, field, alias, value):
"""Test Assessment {1} set correctly during create via import."""
audit = factories.AuditFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, {})
obj = all_models.Assessment.query.one()
self.assertEqual(str(obj.issue_tracker[field]), str(value))
@ddt.data(
("component_id", "Component ID", 555),
("hotlist_id", "Hotlist ID", 444),
("issue_priority", "Priority", "P2"),
("issue_severity", "Severity", "S2"),
("issue_type", "Issue Type", "PROCESS"),
)
@ddt.unpack
def test_assmt_tmpl_import_create_succeed(self, field, alias, value):
"""Test Assessment Template {1} set correctly during create via import."""
audit = factories.AuditFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees*", "user@example.com"),
("Default Assessment Type", "Control"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, {})
obj = all_models.AssessmentTemplate.query.one()
self.assertEqual(str(obj.issue_tracker[field]), str(value))
@ddt.data(
("issue_priority", "Priority", ""),
("issue_priority", "Priority", "P6"),
("issue_severity", "Severity", ""),
("issue_severity", "Severity", "aa"),
("issue_type", "Issue Type", ""),
("issue_type", "Issue Type", "PARABOLA"),
("issue_type", "Issue Type", "BUG"),
)
@ddt.unpack
def test_default_value_set_correctly(self, missed_field, alias, value):
"""Test correct default value was set if csv."{1}"={2!r} during import."""
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Issue": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Issue Title"),
("Due Date*", "2016-10-24T15:35:37"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
issue = all_models.Issue.query.one()
self.assertEqual(str(issue.issue_tracker[missed_field]),
str(default_values[missed_field]))
@ddt.data("", "aaa")
def test_default_hotlist_for_issue(self, value):
"""Test correct default hotlist was set to Issue during import."""
if value:
expected_warning = (
errors.WRONG_VALUE_DEFAULT.format(line=3, column_name="Hotlist ID")
)
expected_messages = {
"Issue": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Issue Title"),
("Hotlist ID", value),
("Due Date*", "2016-10-24T15:35:37"),
]))
self._check_csv_response(response, expected_messages)
issue = all_models.Issue.query.one()
self.assertEqual(str(issue.issue_tracker["hotlist_id"]),
str(default_values["issue_hotlist_id"]))
@ddt.data("", "aaa")
def test_default_component_for_issue(self, value):
"""Test correct default component was set to Issue during import."""
if value:
expected_warning = (
errors.WRONG_VALUE_DEFAULT.format(line=3, column_name="Component ID")
)
expected_messages = {
"Issue": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Issue Title"),
("Component ID", value),
("Due Date*", "2016-10-24T15:35:37"),
]))
self._check_csv_response(response, expected_messages)
issue = all_models.Issue.query.one()
self.assertEqual(str(issue.issue_tracker["component_id"]),
str(default_values["issue_component_id"]))
@ddt.data(
("component_id", "Component ID", ""),
("component_id", "Component ID", "sss"),
("hotlist_id", "Hotlist ID", ""),
("hotlist_id", "Hotlist ID", "aaa"),
("issue_priority", "Priority", ""),
("issue_priority", "Priority", "P6"),
("issue_severity", "Severity", ""),
("issue_severity", "Severity", "aa"),
("issue_type", "Issue Type", ""),
("issue_type", "Issue Type", "PARABOLA"),
)
@ddt.unpack
def test_audit_default_value_set_correctly(self, missed_field, alias, value):
"""Test correct default value was set to Audit {1} during import"""
program = factories.ProgramFactory()
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Audit": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
response = self.import_data(collections.OrderedDict([
("object_type", "Audit"),
("Code*", ""),
("Program", program.slug),
("Title", "Audit Title"),
("State", "Planned"),
("Audit Captains", "user@example.com"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
issue = all_models.Audit.query.one()
self.assertEqual(str(issue.issue_tracker[missed_field]),
str(default_values[missed_field]))
@ddt.data(
("component_id", "Component ID", 123),
("hotlist_id", "Hotlist ID", 321),
("issue_priority", "Priority", "P1"),
("issue_severity", "Severity", "S1"),
("issue_type", "Issue Type", "PROCESS"),
)
@ddt.unpack
def test_audit_import_create_succeed(self, field, alias, value):
"""Test Audit "{0}"={2} set correctly during create via import."""
program = factories.ProgramFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Audit"),
("Code*", ""),
("Program", program.slug),
("Title", "Audit Title"),
("State", "Planned"),
("Audit Captains", "user@example.com"),
(alias, value),
]))
self._check_csv_response(response, {})
obj = all_models.Audit.query.one()
self.assertEqual(str(obj.issue_tracker[field]), str(value))
@staticmethod
def _prepare_expected_import_resp(model_name, block_errors=(),
block_warnings=(), row_errors=(),
row_warnings=()):
"""Construct expected response message for import of specific model."""
if not any([block_errors, block_warnings, row_errors, row_warnings]):
return {}
return {
model_name: {
"block_errors": set(block_errors),
"block_warnings": set(block_warnings),
"row_errors": set(row_errors),
"row_warnings": set(row_warnings),
}
}
@ddt.data(
("on", True, []),
("off", False, []),
(
"",
True,
[
errors.WRONG_VALUE_DEFAULT.format(
line=3, column_name="Sync people with Ticket Tracker")
],
),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_people_sync_audit_create(self, imported_value, expected_obj_value,
expected_warnings):
"""Test Audit people sync={0} set during create via import."""
program = factories.ProgramFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Audit"),
("Code*", ""),
("Program", program.slug),
("Title", "Audit Title"),
("State", "Planned"),
("Audit Captains", "user@example.com"),
("Ticket Tracker Integration", "on"),
("Sync people with Ticket Tracker", imported_value),
]))
expected_resp = self._prepare_expected_import_resp(
"Audit", row_warnings=expected_warnings
)
self._check_csv_response(response, expected_resp)
audit = all_models.Audit.query.one()
self.assertEqual(
audit.issue_tracker["people_sync_enabled"],
expected_obj_value,
)
@ddt.data(
(True, "on", True, []),
(True, "off", False, []),
(False, "on", True, []),
(False, "off", False, []),
(
True, "", True,
[
errors.WRONG_VALUE_DEFAULT.format(
line=3,
column_name="Sync people with Ticket Tracker",
)
],
),
(
False, "", True,
[
errors.WRONG_VALUE_DEFAULT.format(
line=3,
column_name="Sync people with Ticket Tracker",
)
],
),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_people_sync_audit_update(self, current_obj_value, imported_value,
expected_obj_value, expected_warnings):
"""Test Audit people sync={0} set during updated via import."""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
people_sync_enabled=current_obj_value,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Audit"),
("Code*", audit.slug),
("Sync people with Ticket Tracker", imported_value),
]))
expected_resp = self._prepare_expected_import_resp(
"Audit", row_warnings=expected_warnings
)
self._check_csv_response(response, expected_resp)
audit = all_models.Audit.query.one()
self.assertEqual(
audit.issue_tracker["people_sync_enabled"],
expected_obj_value,
)
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_bulk_create_from_import(self):
"""Test data was imported and tickets were updated using bulk mechanism."""
program = factories.ProgramFactory(title="program-1")
audit = factories.AuditFactory(title="Audit-1",
program=program)
assessment_data = [
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Assessment-1"),
])
]
self.import_data(*assessment_data)
assessment = all_models.Assessment.query.one()
assessment_slug = assessment.slug
issue_data = [
collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Issue Title"),
("Due Date*", "2019-11-20T15:35:37"),
])
]
response = self.import_data(*issue_data)
issue = all_models.Issue.query.one()
issue_slug = issue.slug
self._check_csv_response(response, {})
iti = all_models.IssuetrackerIssue
assmt_iti = iti.query.filter(iti.object_type == "Assessment").one()
assmt_iti.enabled = True
assmt_iti.title = ''
issue_iti = iti.query.filter(iti.object_type == "Issue").one()
issue_iti.enabled = True
issue_iti.issue_id = 123
db.session.commit()
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("code", assessment_slug),
("title", "Title1"),
]))
send_mock.assert_called_once()
self.mock_create_issue.assert_called_once()
with mock.patch(
"ggrc.integrations.issues.Client.update_issue"
) as update_mock:
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("code", issue_slug),
("priority", "P1"),
]))
send_mock.assert_called_once()
update_mock.assert_called_once()
@ddt.data(
("component_id", "Component ID", "", 123),
("component_id", "Component ID", "sss", 456),
("hotlist_id", "Hotlist ID", "", 789),
("hotlist_id", "Hotlist ID", "aaa", 589),
("issue_priority", "Priority", "", "P4"),
("issue_priority", "Priority", "P6", "P0"),
("issue_severity", "Severity", "", "S1"),
("issue_severity", "Severity", "aa", "S3"),
("issue_type", "Issue Type", "", "PROCESS"),
("issue_type", "Issue Type", "PARABOLA", "PROCESS"),
)
@ddt.unpack
def test_assmt_default_values_from_audit(self,
missed_field,
alias,
value,
audit_value):
"""Test correct default value was set from audit to {0}"""
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Assessment": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
with factories.single_commit():
audit = factories.AuditFactory()
iti = factories.IssueTrackerIssueFactory(issue_tracked_obj=audit)
setattr(iti, missed_field, audit_value)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Assessment.query.one()
self.assertEqual(str(obj.issue_tracker[missed_field]),
str(audit_value))
@ddt.data(
("component_id", "Component ID", ""),
("component_id", "Component ID", "sss"),
("hotlist_id", "Hotlist ID", ""),
("hotlist_id", "Hotlist ID", "aaa"),
("issue_priority", "Priority", ""),
("issue_priority", "Priority", "P6"),
("issue_severity", "Severity", ""),
("issue_severity", "Severity", "aa"),
("issue_type", "Issue Type", ""),
("issue_type", "Issue Type", "PARABOLA"),
)
@ddt.unpack
def test_assmt_default_values_from_default(self,
missed_field,
alias,
value):
"""Test correct default value was set to {0} if audit doesn't have one"""
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Assessment": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(issue_tracked_obj=audit)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Assessment.query.one()
self.assertEqual(str(obj.issue_tracker[missed_field]),
str(default_values[missed_field]))
@ddt.data(
("component_id", "Component ID", "", 123),
("component_id", "Component ID", "sss", 456),
("hotlist_id", "Hotlist ID", "", 789),
("hotlist_id", "Hotlist ID", "aaa", 589),
("issue_priority", "Priority", "", "P4"),
("issue_priority", "Priority", "P6", "P0"),
("issue_severity", "Severity", "", "S1"),
("issue_severity", "Severity", "aa", "S3"),
("issue_type", "Issue Type", "", "PROCESS"),
("issue_type", "Issue Type", "PARABOLA", "PROCESS"),
)
@ddt.unpack
def test_assmt_tmpl_default_values_from_audit(self,
missed_field,
alias,
value,
audit_value):
"""Test default value was set from audit to {0} for Assesment Template"""
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Assessment Template": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
with factories.single_commit():
audit = factories.AuditFactory()
iti = factories.IssueTrackerIssueFactory(issue_tracked_obj=audit)
setattr(iti, missed_field, audit_value)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees*", "user@example.com"),
("Default Assessment Type", "Control"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.AssessmentTemplate.query.one()
self.assertEqual(str(obj.issue_tracker[missed_field]),
str(audit_value))
@ddt.data(
("component_id", "Component ID", ""),
("component_id", "Component ID", "sss"),
("hotlist_id", "Hotlist ID", ""),
("hotlist_id", "Hotlist ID", "aaa"),
("issue_priority", "Priority", ""),
("issue_priority", "Priority", "P6"),
("issue_severity", "Severity", ""),
("issue_severity", "Severity", "aa"),
("issue_type", "Issue Type", ""),
("issue_type", "Issue Type", "PARABOLA"),
)
@ddt.unpack
def test_assmt_tmpl_default_values_from_default(self,
missed_field,
alias,
value):
"""Test default value was set to Assessment Template {0}"""
if value:
expected_warning = expected_warning_for_default(line=3,
column_name=missed_field,
alias=alias)
expected_messages = {
"Assessment Template": {
"row_warnings": {expected_warning},
}
}
else:
expected_messages = {}
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(issue_tracked_obj=audit)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees*", "user@example.com"),
("Default Assessment Type", "Control"),
("Title", "Object Title"),
(alias, value),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.AssessmentTemplate.query.one()
self.assertEqual(str(obj.issue_tracker[missed_field]),
str(default_values[missed_field]))
@ddt.data(
("component_id", "Component ID", "", 123),
("component_id", "Component ID", "sss", 123),
("component_id", "Component ID", None, 123),
("hotlist_id", "Hotlist ID", "", 123),
("hotlist_id", "Hotlist ID", "aaa", 123),
("hotlist_id", "Hotlist ID", None, 123),
("issue_priority", "Priority", "", "P4"),
("issue_priority", "Priority", "P6", "P0"),
("issue_priority", "Priority", None, "P0"),
("issue_severity", "Severity", "", "S1"),
("issue_severity", "Severity", "aa", "S3"),
("issue_severity", "Severity", None, "S3"),
("issue_type", "Issue Type", "", "PROCESS"),
("issue_type", "Issue Type", "PARABOLA", "PROCESS"),
("issue_type", "Issue Type", None, "PROCESS"),
("enabled", "Ticket Tracker Integration", "", True),
("enabled", "Ticket Tracker Integration", "aa", True),
("enabled", "Ticket Tracker Integration", None, True),
("enabled", "Ticket Tracker Integration", "", False),
("enabled", "Ticket Tracker Integration", "aa", False),
("enabled", "Ticket Tracker Integration", None, False),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_asmt_default_values_from_tmpl(self, field, alias, value,
tmpl_value):
"""Test set tmpl.{0}={3!r} if csv.{1!r}={2!r} and audit/app integr on"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit, enabled=True)
tmpl = factories.AssessmentTemplateFactory(audit=audit)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=tmpl, **{field: tmpl_value})
fields = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Template", tmpl.slug),
("Title", "Object Title"),
])
if value is not None:
fields[alias] = value
response = self.import_data(fields)
if value:
# ensure that warning is returned
expected_warning = expected_warning_for_default(line=3,
column_name=field,
alias=alias)
expected_messages = {"Assessment": {"row_warnings": {expected_warning}}}
self._check_csv_response(response, expected_messages)
obj = all_models.Assessment.query.one()
self.assertEqual(str(obj.issue_tracker[field]),
str(tmpl_value))
self.mock_create_issue.assert_not_called()
@ddt.ddt
class TestEnabledViaImport(TestIssueTrackedImport):
"""Test cases for integration status set correctly via import"""
def _assert_integration_state(self, obj, value):
"""Make assertion to check Ticket Tracker Integration field."""
expected_res = bool(value in
issue_tracker.IssueTrackerEnabledHandler.TRUE_VALUES)
self.assertEqual(bool(obj.issue_tracker["enabled"]),
expected_res)
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_assmt_generation_disallowed_wo_audit(self):
"""Test we can't turn integration On for Assessment w/o audit"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=False,
issue_id=None,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Object Title"),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, {})
assmt = all_models.Assessment.query.one()
self.assertFalse(assmt.issue_tracker["enabled"])
@ddt.data("Draft", "Active")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_generation_issue_allowed_on_update(self, status):
"""Test ticket generation allowed for Issue in {} status on update"""
with factories.single_commit():
obj = factories.IssueFactory(status=status)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=obj,
enabled=False,
issue_id=None,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", obj.slug),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, {})
obj = all_models.Issue.query.one()
self.assertTrue(obj.issue_tracker["enabled"])
self.mock_create_issue.assert_called_once()
@ddt.data("Draft", "Active")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_generation_issue_allowed_on_create(self, status):
"""Test ticket generation allowed for Issue in status={0} on create"""
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("State", status),
("Title", "Object Title"),
("Ticket Tracker Integration", "On"),
("Due Date*", "2016-10-24T15:35:37"),
]))
self._check_csv_response(response, {})
obj = all_models.Issue.query.one()
self.assertTrue(obj.issue_tracker["enabled"])
self.mock_create_issue.assert_called_once()
@ddt.data("Fixed", "Fixed and Verified", "Deprecated")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_ticket_generation_issue_disallowed_on_update(self, status):
"""Test ticket generation disallowed for Issue in {} status on update"""
with factories.single_commit():
obj = factories.IssueFactory(status=status)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=obj,
enabled=False,
issue_id=None,
)
expected_warning = (
errors.WRONG_ISSUE_TICKET_STATUS.format(
line=3,
column_name="Ticket Tracker Integration",
)
)
expected_messages = {
"Issue": {
"row_warnings": {expected_warning},
}
}
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", obj.slug),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Issue.query.one()
self.assertFalse(obj.issue_tracker["enabled"])
@ddt.data("Fixed", "Fixed and Verified", "Deprecated")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_ticket_generation_issue_disallowed_on_create(self, status):
"""Test ticket generation disallowed for Issue in {} status on create"""
expected_warning = (
errors.WRONG_ISSUE_TICKET_STATUS.format(
line=3,
column_name="Ticket Tracker Integration",
)
)
expected_messages = {
"Issue": {
"row_warnings": {expected_warning},
}
}
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Object Title"),
("State", status),
("Ticket Tracker Integration", "On"),
("Due Date*", "2016-10-24T15:35:37"),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Issue.query.one()
self.assertFalse(obj.issue_tracker["enabled"])
@ddt.data("on", "off")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_enabled_state_audit_create_succeed(self, value):
"""Test Audit integration={0} set correctly during create via import."""
program = factories.ProgramFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Audit"),
("Code*", ""),
("Program", program.slug),
("Title", "Audit Title"),
("State", "Planned"),
("Audit Captains", "user@example.com"),
("Ticket Tracker Integration", value),
]))
self._check_csv_response(response, {})
obj = all_models.Audit.query.one()
self._assert_integration_state(obj, value)
@ddt.data(
("Issue", "Issue"),
("Assessment", "Assessment"),
("Audit", "Audit"),
("AssessmentTemplate", "Assessment Template"),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_enabled_state_default_value(self, model, model_name):
"""Test correct default value was set to {0} enabled during import."""
factory = factories.get_model_factory(model)
obj = factory()
expected_warning = (
errors.WRONG_VALUE_DEFAULT.format(
line=3,
column_name="Ticket Tracker Integration",
)
)
expected_messages = {
model_name: {
"row_warnings": {expected_warning}
}
}
response = self.import_data(collections.OrderedDict([
("object_type", model_name),
("Code*", obj.slug),
("Ticket Tracker Integration", "test_value"),
]))
self._check_csv_response(response, expected_messages)
obj = models.get_model(model).query.one()
self.assertEqual(obj.issue_tracker["enabled"], False)
@ddt.data(
(True, "off", "off"),
(True, "on", "on"),
(False, "off", "off"),
(False, "on", "off"),
(None, "off", "off"),
(None, "on", "off"),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_enabled_state_assmt_tmpl_create_succeed(self, audit_value,
tmpl_value, expected):
"""Test Template set integr state={2} if audit integr={0} and csv={1}"""
audit = factories.AuditFactory()
if audit_value is not None:
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=audit_value,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", ""),
("Audit*", audit.slug),
("Default Assignees*", "user@example.com"),
("Default Assessment Type", "Control"),
("Title", "Object Title"),
("Ticket Tracker Integration", tmpl_value),
]))
self._check_csv_response(response, {})
obj = all_models.AssessmentTemplate.query.one()
self._assert_integration_state(obj, expected)
@ddt.data("on", "off")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_enabled_state_assmt_create_succeed(self, value):
"""Test Assessment integration state={0} set correctly during create."""
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Title", "Object Title"),
("Ticket Tracker Integration", value),
]))
self._check_csv_response(response, {})
obj = all_models.Assessment.query.one()
self._assert_integration_state(obj, value)
self.mock_create_issue.assert_not_called()
@ddt.data("on", "off")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_assmt_import_enabled_update_succeed(self, value):
"""Test Assmt integr state={0} set correctly when updated via import."""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
assmt = factories.AssessmentFactory(audit=audit)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assmt,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assmt.slug),
("Ticket Tracker Integration", value),
]))
obj = all_models.Assessment.query.one()
self._check_csv_response(response, {})
self._assert_integration_state(obj, value)
@ddt.data(
("Issue", "Issue", "on"),
("Issue", "Issue", "off"),
("Audit", "Audit", "on"),
("Audit", "Audit", "off"),
("AssessmentTemplate", "Assessment Template", "on"),
("AssessmentTemplate", "Assessment Template", "off"),
)
@ddt.unpack
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_import_enabled_update_succeed(self, model, model_name, value):
"""Test {0} integration state={2} set correctly when updated via import."""
with factories.single_commit():
factory = factories.get_model_factory(model)
obj = factory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=obj,
)
response = self.import_data(collections.OrderedDict([
("object_type", model_name),
("Code*", obj.slug),
("Ticket Tracker Integration", value),
]))
obj = models.get_model(model).query.one()
self._check_csv_response(response, {})
self._assert_integration_state(obj, value)
@ddt.data("on", "off")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_enabled_state_issue_create_succeed(self, value):
"""Test Issue integr state={0} set correctly during create via import."""
response = self.import_data(collections.OrderedDict([
("object_type", "Issue"),
("Code*", ""),
("Admin", "user@example.com"),
("Title", "Object Title"),
("Ticket Tracker Integration", value),
("Due Date*", "2016-10-24T15:35:37"),
]))
self._check_csv_response(response, {})
obj = all_models.Issue.query.one()
self._assert_integration_state(obj, value)
@ddt.data("In Progress", "Not Started", "Rework Needed")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_generation_assmt_allowed_on_update(self, status):
"""Test ticket generation allowed for Assessment in {} status on update"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
assmt = factories.AssessmentFactory(status=status, audit=audit)
person = factories.PersonFactory()
factories.AccessControlPersonFactory(
ac_list=assmt.acr_name_acl_map["Verifiers"],
person=person,
)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assmt,
enabled=False,
issue_id=None,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assmt.slug),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, {})
obj = all_models.Assessment.query.one()
self.assertTrue(obj.issue_tracker["enabled"])
self.mock_create_issue.assert_called_once()
@ddt.data("Completed", "In Review", "Deprecated", "Verified")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_generation_assmt_disallowed_on_update(self, status):
"""Test ticket generation disallowed for Assmt in {} status on update"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
assmt = factories.AssessmentFactory(status=status, audit=audit)
person = factories.PersonFactory()
factories.AccessControlPersonFactory(
ac_list=assmt.acr_name_acl_map["Verifiers"],
person=person,
)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assmt,
enabled=False,
issue_id=None,
)
expected_warning = (
errors.WRONG_ASSESSMENT_TICKET_STATUS.format(
line=3,
column_name="Ticket Tracker Integration",
)
)
expected_messages = {
"Assessment": {
"row_warnings": {expected_warning},
}
}
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assmt.slug),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Assessment.query.one()
self.assertFalse(obj.issue_tracker["enabled"])
@ddt.data("In Progress", "Not Started", "Rework Needed")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_ticket_generation_assmt_allowed_on_create(self, status):
"""Test ticket generation allowed for Assessment in {} status on create"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Verifiers", "user@example.com"),
("Title", "Object Title"),
("State", status),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, {})
obj = all_models.Assessment.query.one()
self.assertTrue(obj.issue_tracker["enabled"])
@ddt.data("Completed", "In Review", "Deprecated", "Verified")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_ticket_generation_assmt_disallowed_on_create(self, status):
"""Test ticket generation disallowed for Assmt in {} status on update"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
enabled=True,
)
expected_warning = (
errors.WRONG_ASSESSMENT_TICKET_STATUS.format(
line=3,
column_name="Ticket Tracker Integration",
)
)
expected_messages = {
"Assessment": {
"row_warnings": {expected_warning},
}
}
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "user@example.com"),
("Creators", "user@example.com"),
("Verifiers", "user@example.com"),
("Title", "Object Title"),
("State", status),
("Ticket Tracker Integration", "On"),
]))
self._check_csv_response(response, expected_messages)
obj = all_models.Assessment.query.one()
self.assertFalse(obj.issue_tracker["enabled"])
@ddt.ddt
@mock.patch("ggrc.integrations.issues.Client.create_issue")
@mock.patch("ggrc.integrations.issues.Client.update_issue")
class TestImportIssueTrackedNotif(ggrc.TestCase):
"""Test cases for notifications during import of IssueTracked objects."""
def setUp(self): # pylint: disable=missing-docstring
super(TestImportIssueTrackedNotif, self).setUp()
self.client.get("/login")
current_user = all_models.Person.query.filter_by(
email="user@example.com",
).first()
self.current_user_email = current_user.email
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_generate_it_issue_notif(self, *_):
"""Test email is sent if issue in issuetracker is created during import."""
with factories.single_commit():
assessment = factories.AssessmentFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assessment.audit,
enabled=True,
)
assessment_data = [
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("Ticket Tracker Integration", "On"),
]),
]
with mock.patch(
"ggrc.notifications.common.send_email",
) as mocked_send_email:
response = self.import_data(*assessment_data)
self._check_csv_response(response, {})
it_bulk_creator = issuetracker_bulk_sync.IssueTrackerBulkCreator
mocked_send_email.assert_called_with(
self.current_user_email,
it_bulk_creator.ISSUETRACKER_SYNC_TITLE,
settings.EMAIL_BULK_SYNC_SUCCEEDED.render(sync_data={
"title": it_bulk_creator.SUCCESS_TITLE.format(filename=""),
"email_text": it_bulk_creator.SUCCESS_TEXT,
}),
)
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_update_it_issue_notif(self, *_):
"""Test email is sent if issue in issuetracker is updated during import."""
with factories.single_commit():
assessment = factories.AssessmentFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assessment.audit,
enabled=True,
)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assessment,
enabled=True,
)
assessment_data = [
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
]),
]
with mock.patch(
"ggrc.notifications.common.send_email",
) as mocked_send_email:
response = self.import_data(*assessment_data)
self._check_csv_response(response, {})
it_bulk_updater = issuetracker_bulk_sync.IssueTrackerBulkUpdater
mocked_send_email.assert_called_with(
self.current_user_email,
it_bulk_updater.ISSUETRACKER_SYNC_TITLE,
settings.EMAIL_BULK_SYNC_SUCCEEDED.render(sync_data={
"title": it_bulk_updater.SUCCESS_TITLE.format(filename=""),
"email_text": it_bulk_updater.SUCCESS_TEXT,
}),
)
| 36.390578
| 79
| 0.605742
| 4,935
| 47,890
| 5.653495
| 0.060588
| 0.021935
| 0.019068
| 0.043584
| 0.829713
| 0.788602
| 0.772581
| 0.755914
| 0.733943
| 0.716165
| 0
| 0.008373
| 0.249363
| 47,890
| 1,315
| 80
| 36.418251
| 0.767748
| 0.062184
| 0
| 0.710095
| 0
| 0
| 0.202216
| 0.018485
| 0
| 0
| 0
| 0
| 0.038827
| 1
| 0.035375
| false
| 0
| 0.062985
| 0
| 0.1044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf20b2641a1b11444ae0fd4623b65b55a1f7d7dc
| 97
|
py
|
Python
|
pandapower/shortcircuit/__init__.py
|
yougnen/pandapower
|
d206bd91e68dd03675f7fe8ddee141621ef437fc
|
[
"BSD-3-Clause"
] | 104
|
2017-02-21T17:13:51.000Z
|
2022-03-21T13:52:27.000Z
|
pandapower/shortcircuit/__init__.py
|
lvzhibai/pandapower
|
24ed3056558887cc89f67d15b5527523990ae9a1
|
[
"BSD-3-Clause"
] | 126
|
2017-02-15T17:09:08.000Z
|
2018-07-16T13:25:15.000Z
|
pandapower/shortcircuit/__init__.py
|
lvzhibai/pandapower
|
24ed3056558887cc89f67d15b5527523990ae9a1
|
[
"BSD-3-Clause"
] | 57
|
2017-03-08T13:49:32.000Z
|
2022-02-28T10:36:55.000Z
|
from pandapower.shortcircuit.calc_sc import calc_sc
from pandapower.shortcircuit.toolbox import *
| 48.5
| 51
| 0.876289
| 13
| 97
| 6.384615
| 0.538462
| 0.337349
| 0.626506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 97
| 2
| 52
| 48.5
| 0.922222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
17286817e8f2c4ec6931433f113b6f15705189e2
| 9,263
|
py
|
Python
|
aifo_simulation/java-code/projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/analyze.py
|
AlmondDust/Assignment3-Final-AIFO
|
b006b2090a7b597fde7f92e9d9fbf204bc3c993e
|
[
"Apache-2.0"
] | 21
|
2021-05-26T11:58:50.000Z
|
2022-03-29T12:46:28.000Z
|
aifo_simulation/java-code/projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/analyze.py
|
AlmondDust/Assignment3-Final-AIFO
|
b006b2090a7b597fde7f92e9d9fbf204bc3c993e
|
[
"Apache-2.0"
] | 1
|
2021-12-08T03:43:44.000Z
|
2021-12-08T03:43:44.000Z
|
aifo_simulation/java-code/projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/analyze.py
|
AlmondDust/Assignment3-Final-AIFO
|
b006b2090a7b597fde7f92e9d9fbf204bc3c993e
|
[
"Apache-2.0"
] | 4
|
2021-09-25T20:10:49.000Z
|
2022-03-14T06:39:26.000Z
|
#!/usr/bin/python
# This python scripts extracts the data from the logs that we want to plot and outputs it in a format that gnuplot can
# later on represent.
# Theoretical plot number of combinations
#!/usr/bin/python
import math
if __name__ == '__main__':
########################################################################################################################
# Mean global flow completion time vs. utilization pFabric
lambdas = [3600, 5200, 7000, 8900, 11100, 14150, 19000]
FCTs = [[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]]
row = 0
for x in lambdas:
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR0.02/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_99th_fct_ms" in line:
FCTs[row][1]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_99th_fct_ms" in line:
FCTs[row][2]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W100_SR0.1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_99th_fct_ms" in line:
FCTs[row][3]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W1000_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_99th_fct_ms" in line:
FCTs[row][5]=line.split("=")[1].split("\n")[0]
break
r.close()
row = row + 1
w = open('projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/pFabric_less_100KB_99th_fct_ms.dat', 'w')
w.write("# W20_SR0.02 W20_SR1 W100_SR0.1 W1000_SR1\n")
w.write("3600 %s %s %s %s \n" % (FCTs[0][1], FCTs[0][2], FCTs[0][3], FCTs[0][5]))
w.write("5200 %s %s %s %s \n" % (FCTs[1][1], FCTs[1][2], FCTs[1][3], FCTs[1][5]))
w.write("7000 %s %s %s %s \n" % (FCTs[2][1], FCTs[2][2], FCTs[2][3], FCTs[2][5]))
w.write("8900 %s %s %s %s \n" % (FCTs[3][1], FCTs[3][2], FCTs[3][3], FCTs[3][5]))
w.write("11100 %s %s %s %s \n" % (FCTs[4][1], FCTs[4][2], FCTs[4][3], FCTs[4][5]))
w.write("14150 %s %s %s %s \n" % (FCTs[5][1], FCTs[5][2], FCTs[5][3], FCTs[5][5]))
w.write("19000 %s %s %s %s \n" % (FCTs[6][1], FCTs[6][2], FCTs[6][3], FCTs[6][5]))
w.close()
########################################################################################################################
# Mean global flow completion time vs. utilization pFabric
lambdas = [3600, 5200, 7000, 8900, 11100, 14150, 19000]
FCTs = [[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]]
row = 0
for x in lambdas:
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR0.02/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_mean_fct_ms" in line:
FCTs[row][1]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_mean_fct_ms" in line:
FCTs[row][2]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W100_SR0.1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_mean_fct_ms" in line:
FCTs[row][3]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W1000_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "less_100KB_mean_fct_ms" in line:
FCTs[row][5]=line.split("=")[1].split("\n")[0]
break
r.close()
row = row + 1
w = open('projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/pFabric_less_100KB_mean_fct_ms.dat', 'w')
w.write("# W20_SR0.02 W20_SR1 W100_SR0.1 W1000_SR1\n")
w.write("3600 %s %s %s %s \n" % (FCTs[0][1], FCTs[0][2], FCTs[0][3], FCTs[0][5]))
w.write("5200 %s %s %s %s \n" % (FCTs[1][1], FCTs[1][2], FCTs[1][3], FCTs[1][5]))
w.write("7000 %s %s %s %s \n" % (FCTs[2][1], FCTs[2][2], FCTs[2][3], FCTs[2][5]))
w.write("8900 %s %s %s %s \n" % (FCTs[3][1], FCTs[3][2], FCTs[3][3], FCTs[3][5]))
w.write("11100 %s %s %s %s \n" % (FCTs[4][1], FCTs[4][2], FCTs[4][3], FCTs[4][5]))
w.write("14150 %s %s %s %s \n" % (FCTs[5][1], FCTs[5][2], FCTs[5][3], FCTs[5][5]))
w.write("19000 %s %s %s %s \n" % (FCTs[6][1], FCTs[6][2], FCTs[6][3], FCTs[6][5]))
w.close()
########################################################################################################################
# Mean global flow completion time vs. utilization pFabric
lambdas = [3600, 5200, 7000, 8900, 11100, 14150, 19000]
FCTs = [[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]]
row = 0
for x in lambdas:
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR0.02/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "geq_1MB_mean_fct_ms" in line:
FCTs[row][1]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W20_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "geq_1MB_mean_fct_ms" in line:
FCTs[row][2]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W100_SR0.1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "geq_1MB_mean_fct_ms" in line:
FCTs[row][3]=line.split("=")[1].split("\n")[0]
break
r.close()
file = "temp/aifo/aifo_evaluation/pFabric/web_search_workload_sample_rate/"+str(x)+"/AIFO_W1000_SR1/analysis/flow_completion.statistics"
r = open(file, 'r')
lines = r.readlines()
for i, line in enumerate(lines):
if "geq_1MB_mean_fct_ms" in line:
FCTs[row][5]=line.split("=")[1].split("\n")[0]
break
r.close()
row = row + 1
w = open('projects/aifo/plots/aifo_evaluation/pFabric/web_search_workload_w_sr/pFabric_geq_1MB_mean_fct_ms.dat', 'w')
w.write("# W20_SR0.02 W20_SR1 W100_SR0.1 W1000_SR1\n")
w.write("3600 %s %s %s %s \n" % (FCTs[0][1], FCTs[0][2], FCTs[0][3], FCTs[0][5]))
w.write("5200 %s %s %s %s \n" % (FCTs[1][1], FCTs[1][2], FCTs[1][3], FCTs[1][5]))
w.write("7000 %s %s %s %s \n" % (FCTs[2][1], FCTs[2][2], FCTs[2][3], FCTs[2][5]))
w.write("8900 %s %s %s %s \n" % (FCTs[3][1], FCTs[3][2], FCTs[3][3], FCTs[3][5]))
w.write("11100 %s %s %s %s \n" % (FCTs[4][1], FCTs[4][2], FCTs[4][3], FCTs[4][5]))
w.write("14150 %s %s %s %s \n" % (FCTs[5][1], FCTs[5][2], FCTs[5][3], FCTs[5][5]))
w.write("19000 %s %s %s %s \n" % (FCTs[6][1], FCTs[6][2], FCTs[6][3], FCTs[6][5]))
w.close()
| 45.856436
| 145
| 0.506423
| 1,416
| 9,263
| 3.175847
| 0.074153
| 0.054703
| 0.080053
| 0.104069
| 0.957972
| 0.955748
| 0.955748
| 0.955748
| 0.955748
| 0.955748
| 0
| 0.099911
| 0.270647
| 9,263
| 202
| 146
| 45.856436
| 0.565719
| 0.041023
| 0
| 0.968944
| 0
| 0
| 0.341592
| 0.221465
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006211
| 0
| 0.006211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17844c9f317b40ddfad8abcbbb3c641bf3b465fa
| 91
|
py
|
Python
|
tests/test_scm_pipeline.py
|
Forks-yugander-krishan-singh/jenkins-job-builder-pipeline
|
c8aac16b97eb89882e0a5a7250ad8ed33ca7ddd8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scm_pipeline.py
|
Forks-yugander-krishan-singh/jenkins-job-builder-pipeline
|
c8aac16b97eb89882e0a5a7250ad8ed33ca7ddd8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scm_pipeline.py
|
Forks-yugander-krishan-singh/jenkins-job-builder-pipeline
|
c8aac16b97eb89882e0a5a7250ad8ed33ca7ddd8
|
[
"Apache-2.0"
] | null | null | null |
from base import assert_case
def test_script_pipeline():
assert_case('scm_pipeline')
| 15.166667
| 31
| 0.78022
| 13
| 91
| 5.076923
| 0.769231
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 5
| 32
| 18.2
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bd7c81d7603809e95fceec6361bdcc84ab909f41
| 202
|
py
|
Python
|
selim_sef-solution/lucid/misc/io/__init__.py
|
Hulihrach/RoadDetector
|
9fedd537d7d3a5c81a60562a185fc13370af9a99
|
[
"Apache-2.0"
] | 4,537
|
2018-02-08T22:58:30.000Z
|
2022-03-31T13:24:05.000Z
|
selim_sef-solution/lucid/misc/io/__init__.py
|
Hulihrach/RoadDetector
|
9fedd537d7d3a5c81a60562a185fc13370af9a99
|
[
"Apache-2.0"
] | 260
|
2018-02-08T22:06:50.000Z
|
2022-03-24T18:05:09.000Z
|
selim_sef-solution/lucid/misc/io/__init__.py
|
Hulihrach/RoadDetector
|
9fedd537d7d3a5c81a60562a185fc13370af9a99
|
[
"Apache-2.0"
] | 636
|
2018-02-09T09:50:58.000Z
|
2022-03-17T22:49:59.000Z
|
from lucid.misc.io.showing import show
from lucid.misc.io.loading import load
from lucid.misc.io.saving import save, CaptureSaveContext, batch_save
from lucid.misc.io.scoping import io_scope, scope_url
| 40.4
| 69
| 0.831683
| 34
| 202
| 4.852941
| 0.470588
| 0.218182
| 0.315152
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094059
| 202
| 4
| 70
| 50.5
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
da76a40793f2f1422c13d0932a6d592e32d20c44
| 11,289
|
py
|
Python
|
tzager/pdf_paper.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | 2
|
2021-01-25T17:05:59.000Z
|
2021-04-11T19:05:16.000Z
|
tzager/pdf_paper.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | null | null | null |
tzager/pdf_paper.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | null | null | null |
import json
import requests
def analysis(password, path, title):
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
print('Convering pdf to text ...')
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password_pdf = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password_pdf, caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
full_text = text.replace('-\n', '').replace('’', "'")
fp.close()
device.close()
retstr.close()
text = text.replace('-\n', '').replace('’', "'")
lines = text.split('\n')
lines_section_ids_dict = {}
lines_section_ids = []
for i, line in enumerate(lines[1:-2]):
if len(lines[i-1]) == 0 and len(lines[i+1]) == 0 and len(lines[i]) > 3 and not str(lines[i]).isdigit():
lines_section_ids_dict[i] = lines[i]
lines_section_ids.append(i)
ref_id = -1
data = []
for id in lines_section_ids_dict:
data.append((lines_section_ids_dict[id], id))
data = dict(data)
final_data = {}
final_data['paper_title'] = title
final_data['full_text'] = full_text
try:
ref_id = data['References']
except KeyError:
ref_id = len(lines) - 1
for i, id in enumerate(lines_section_ids):
if i < len(lines_section_ids) - 1 and id < ref_id:
start = lines_section_ids[i]
end = lines_section_ids[i+1]
interval_lines = lines[start+1:end]
interval_lines_txt = ' '.join(interval_lines)
if interval_lines and len(interval_lines_txt) > 100:
final_data[lines_section_ids_dict[start]] = ' '.join(interval_lines)
print('Uploading text ...')
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/paper_analysis/' + password, json=json.dumps(final_data))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def scientific_analysis(password, path, title, topn):
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
print('Convering pdf to text ...')
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password_pdf = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password_pdf, caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
text = text.replace('-\n', '').replace('’', "'").replace('infl', 'infl')
lines = text.split('\n')
lines_section_ids_dict = {}
lines_section_ids = []
for i, line in enumerate(lines[1:-2]):
if len(lines[i-1]) == 0 and len(lines[i+1]) == 0 and len(lines[i]) > 3 and not str(lines[i]).isdigit():
lines_section_ids_dict[i] = lines[i]
lines_section_ids.append(i)
data = []
for id in lines_section_ids_dict:
data.append((lines_section_ids_dict[id], id))
data = dict(data)
final_data = {}
new_txt = ''
try:
ref_id = data['References']
except KeyError:
ref_id = len(lines) - 1
for i, id in enumerate(lines_section_ids):
if i < len(lines_section_ids) - 1 and id < ref_id:
start = lines_section_ids[i]
end = lines_section_ids[i+1]
interval_lines = lines[start+1:end]
interval_lines_txt = ' '.join(interval_lines)
if 'Abbreviations' not in lines_section_ids_dict[start] and '18 of 36' not in lines_section_ids_dict[start]:
new_txt += interval_lines_txt
if interval_lines and len(interval_lines_txt) > 100:
final_data[lines_section_ids_dict[start]] = ' '.join(interval_lines)
final_data['paper_title'] = title
final_data['full_text'] = new_txt
final_data['topn'] = topn
print('Uploading text ...')
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/scientific_analysis/' + password, json=json.dumps(final_data))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def focus_on(password, pkey, entity):
final_data = {'password': password, 'pkey': pkey, 'entity': entity}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/focus_on', json=json.dumps(final_data))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def compare_papers(password, key1, key2, edges_1, edges_2, main_scope_1, main_scope_2):
final_data = {'password': password, 'key1': key1, 'key2': key2, 'edges_1': edges_1, 'edges_2': edges_2, 'main_scope_1': main_scope_1, 'main_scope_2': main_scope_2}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/compare_papers', json=json.dumps(final_data))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def directory_analysis(password, dir_path):
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
import glob
overall_data_to_return = []
all_pdfs_in_path = glob.glob(dir_path+'/*')
for ii, path in enumerate(all_pdfs_in_path):
title = path.replace(dir_path + '/', '').replace('.pdf', '')
print('Convering pdf to text ...', ii+1, '/', len(all_pdfs_in_path))
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password_pdf = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password_pdf, caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
text = text.replace('-\n', '').replace('’', "'").replace('infl', 'infl')
lines = text.split('\n')
lines_section_ids_dict = {}
lines_section_ids = []
for i, line in enumerate(lines[1:-2]):
if len(lines[i-1]) == 0 and len(lines[i+1]) == 0 and len(lines[i]) > 3 and not str(lines[i]).isdigit():
lines_section_ids_dict[i] = lines[i]
lines_section_ids.append(i)
data = []
for id in lines_section_ids_dict:
data.append((lines_section_ids_dict[id], id))
data = dict(data)
final_data = {}
new_txt = ''
try:
ref_id = data['References']
except KeyError:
ref_id = len(lines) - 1
for i, id in enumerate(lines_section_ids):
if i < len(lines_section_ids) - 1 and id < ref_id:
start = lines_section_ids[i]
end = lines_section_ids[i+1]
interval_lines = lines[start+1:end]
interval_lines_txt = ' '.join(interval_lines)
if 'Abbreviations' not in lines_section_ids_dict[start] and '18 of 36' not in lines_section_ids_dict[start]:
new_txt += interval_lines_txt
if interval_lines and len(interval_lines_txt) > 100:
final_data[lines_section_ids_dict[start]] = ' '.join(interval_lines)
final_data['paper_title'] = title
final_data['full_text'] = new_txt
print('Uploading text ...', ii+1, '/', len(all_pdfs_in_path))
print()
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/directory_analysis/' + password, json=json.dumps(final_data))
if response.status_code == 200:
r_data = dict(response.json())
else:
r_data = {'error': response.status_code}
r_data = dict(r_data)
if 'paper_id' in r_data:
overall_data_to_return.append(r_data['paper_id'])
return overall_data_to_return
def directory_scopes(password, papers_ids):
final_data = {'papers_ids': papers_ids}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/directory_scopes/' + password, json=json.dumps(final_data))
if response.status_code == 200:
r_data = dict(response.json())
else:
r_data = {'error': response.status_code}
r_data = dict(r_data)
return r_data
def complementary_papers(password, papers_ids):
final_data = {'papers_ids': papers_ids}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/complementary_papers/' + password, json=json.dumps(final_data))
if response.status_code == 200:
r_data = dict(response.json())
else:
r_data = {'error': response.status_code}
r_data = dict(r_data)
return r_data
def intuition_connection(password, papers_ids, focus_on=None):
final_data = {'paper_ids': papers_ids, 'focus_on': focus_on}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/intuition_connection/' + password, json=json.dumps(final_data))
if response.status_code == 200:
r_data = dict(response.json())
else:
r_data = {'error': response.status_code}
r_data = dict(r_data)
return r_data
def intuition_mechanisms(password, papers_ids, focus_on=None):
final_data = {'paper_ids': papers_ids, 'focus_on': focus_on}
response = requests.post('http://tzagerlib1-env.eba-wjp8tqpj.eu-west-2.elasticbeanstalk.com/intuition_mechanisms/' + password, json=json.dumps(final_data))
if response.status_code == 200:
r_data = dict(response.json())
else:
r_data = {'error': response.status_code}
r_data = dict(r_data)
return r_data
| 39.75
| 167
| 0.642572
| 1,450
| 11,289
| 4.789655
| 0.095862
| 0.063931
| 0.079914
| 0.05198
| 0.900792
| 0.894744
| 0.893017
| 0.885817
| 0.879194
| 0.87329
| 0
| 0.015127
| 0.232882
| 11,289
| 283
| 168
| 39.890459
| 0.786605
| 0
| 0
| 0.844
| 0
| 0.036
| 0.113218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036
| false
| 0.096
| 0.072
| 0
| 0.144
| 0.028
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
da8f0055a77ed6fa4e5e1522444ff2ad408ea2b3
| 6,668
|
gyp
|
Python
|
common-mk/external_dependencies.gyp
|
doitmovin/chromiumos-platform2
|
6462aaf43072307b5a40eb045a89e473381b5fda
|
[
"BSD-3-Clause"
] | null | null | null |
common-mk/external_dependencies.gyp
|
doitmovin/chromiumos-platform2
|
6462aaf43072307b5a40eb045a89e473381b5fda
|
[
"BSD-3-Clause"
] | null | null | null |
common-mk/external_dependencies.gyp
|
doitmovin/chromiumos-platform2
|
6462aaf43072307b5a40eb045a89e473381b5fda
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T12:37:19.000Z
|
2021-05-18T13:37:57.000Z
|
{
'targets': [
{
'target_name': 'modemmanager-dbus-proxies',
'type': 'none',
'variables': {
'xml2cpp_type': 'proxy',
'xml2cpp_in_dir': '<(sysroot)/usr/share/dbus-1/interfaces/',
'xml2cpp_out_dir': 'include/dbus_proxies',
},
'sources': [
'<(xml2cpp_in_dir)/mm-mobile-error.xml',
'<(xml2cpp_in_dir)/mm-serial-error.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Cdma.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Firmware.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Card.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Contacts.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Network.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.SMS.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Ussd.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Simple.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Bearer.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Location.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Modem3gpp.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.ModemCdma.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Simple.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Time.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Sim.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.xml',
],
'includes': ['xml2cpp.gypi'],
},
{
'target_name': 'modemmanager-dbus-adaptors',
'type': 'none',
'variables': {
'xml2cpp_type': 'adaptor',
'xml2cpp_in_dir': '<(sysroot)/usr/share/dbus-1/interfaces/',
'xml2cpp_out_dir': 'include/dbus_adaptors',
},
'sources': [
'<(xml2cpp_in_dir)/mm-mobile-error.xml',
'<(xml2cpp_in_dir)/mm-serial-error.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Cdma.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Firmware.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Card.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Contacts.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Network.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.SMS.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.Ussd.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Gsm.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.Simple.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.Modem.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Bearer.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Location.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Modem3gpp.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.ModemCdma.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Simple.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.Time.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Modem.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.Sim.xml',
'<(xml2cpp_in_dir)/org.freedesktop.ModemManager1.xml',
],
'includes': ['xml2cpp.gypi'],
},
{
'target_name': 'dbus-proxies',
'type': 'none',
'variables': {
'xml2cpp_type': 'proxy',
'xml2cpp_in_dir': '<(sysroot)/usr/share/dbus-1/interfaces/',
'xml2cpp_out_dir': 'include/dbus_proxies',
},
'sources': [
'<(xml2cpp_in_dir)/org.freedesktop.DBus.Properties.xml',
],
'includes': ['xml2cpp.gypi'],
},
{
'target_name': 'cloud_policy_proto_generator',
'type': 'none',
'hard_dependency': 1,
'variables': {
'policy_tools_dir': '<(sysroot)/usr/share/policy_tools',
'policy_resources_dir': '<(sysroot)/usr/share/policy_resources',
'proto_out_dir': '<(SHARED_INTERMEDIATE_DIR)/proto',
},
'actions': [{
'action_name': 'run_generate_script',
'inputs': [
'<(policy_tools_dir)/generate_policy_source.py',
'<(policy_resources_dir)/policy_templates.json',
'<(policy_resources_dir)/VERSION',
],
'outputs': [ '<(proto_out_dir)/cloud_policy.proto' ],
'action': [
'python', '<(policy_tools_dir)/generate_policy_source.py',
'--cloud-policy-protobuf=<(proto_out_dir)/cloud_policy.proto',
'<(policy_resources_dir)/VERSION',
'<(OS)',
'1', # chromeos-flag
'<(policy_resources_dir)/policy_templates.json',
],
}],
},
{
'target_name': 'policy-protos',
'type': 'static_library',
'variables': {
'proto_in_dir': '<(sysroot)/usr/include/proto',
'proto_out_dir': 'include/bindings',
},
'sources': [
'<(proto_in_dir)/chrome_device_policy.proto',
'<(proto_in_dir)/chrome_extension_policy.proto',
'<(proto_in_dir)/device_management_backend.proto',
'<(proto_in_dir)/device_management_local.proto',
],
'includes': ['protoc.gypi'],
},
{
'target_name': 'user_policy-protos',
'type': 'static_library',
'variables': {
'proto_in_dir': '<(SHARED_INTERMEDIATE_DIR)/proto',
'proto_out_dir': 'include/bindings',
},
'dependencies': [
'cloud_policy_proto_generator',
],
'sources': [
'<(proto_in_dir)/cloud_policy.proto',
],
'includes': ['protoc.gypi'],
},
{
'target_name': 'install_attributes-proto',
'type': 'static_library',
# install_attributes-proto.a is used by a shared_libary
# object, so we need to build it with '-fPIC' instead of '-fPIE'.
'cflags!': ['-fPIE'],
'cflags': ['-fPIC'],
'variables': {
'proto_in_dir': '<(sysroot)/usr/include/proto',
'proto_out_dir': 'include/bindings',
},
'sources': [
'<(proto_in_dir)/install_attributes.proto',
],
'includes': ['protoc.gypi'],
},
],
}
| 41.937107
| 80
| 0.626125
| 723
| 6,668
| 5.495159
| 0.152144
| 0.071734
| 0.144979
| 0.15857
| 0.840171
| 0.798137
| 0.717845
| 0.699723
| 0.699723
| 0.680342
| 0
| 0.015385
| 0.20066
| 6,668
| 158
| 81
| 42.202532
| 0.730019
| 0.019646
| 0
| 0.615385
| 0
| 0
| 0.707179
| 0.568651
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16f24f5dceaff375273799b091cdaa31879fa455
| 12,182
|
py
|
Python
|
task1.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
task1.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
task1.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
import network2
import logging
import numpy as np
logging.basicConfig(level=logging.DEBUG)
# read in the data
# logging.info("READING IN DATA...")
# for reading in normal dataset
# training, validation, test = network2.load_data_wrapper("data/mnist.pkl.gz")
### I WILL ADD AND COMMENT OUT SECTIONS OF CODE BASED ON WHICH TASKS I AM TRYING TO EXECUTE FOR ANY GIVEN ITERATION ###
# Task 1 - Experimenting with BPNN
## Task 1.1 - Effect of cost function with default network structure [784, 10]
### - Quadratic cost function with sigmoid activation function; plot convergence
# logging.info("TASK 1.1 A...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_1a.csv", 'w')
#
# for _ in range(3):
# network = network2.Network([784, 10], cost=network2.QuadraticCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = "Iteration {}\n".format(_)
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}\n\n".format(results)
# f.write(buff)
# f.flush()
# f.close()
### - Cross entropy cost function with sigmoid activation function; plot convergence
# logging.info("TASK 1.1 B...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_1b.csv", 'w')
#
# for _ in range(3):
# network = network2.Network([784, 10], cost=network2.CrossEntropyCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = "Iteration {}\n".format(_)
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}\n\n".format(results)
# f.write(buff)
# f.flush()
#
# f.close()
### - Log-likelihood cost function with softmax activation function; plot convergence
# logging.info("TASK 1.1 C...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_1c.csv", 'w')
#
# for _ in range(3):
# network = network2.Network([784, 10], cost=network2.LogLikelihoodCost, output_activation=network2.SoftmaxActivation)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = "Iteration {}\n".format(_)
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}\n\n".format(results)
# f.write(buff)
# f.flush()
#
# f.close()
## Task 1.2 - Effect of regularization with default network structure [784, 10], no hidden layers, and cross entropy
### - Add L2 normalization on the cost function; plot convergence
# logging.info("TASK 1.2 A...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_2a.csv", 'w')
#
# for l2 in [0.01, 0.1, 1, 10]:
# network = network2.Network([784, 10], cost=network2.CrossEntropyCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, lmbda=l2, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = str(l2) + '\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}".format(results) + '\n\n'
# f.write(buff)
# f.flush()
#
# f.close()
### - Add L1 normalization on the cost function; plot convergence
# logging.info("TASK 1.2 B...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_2b.csv", 'w')
#
# for l1 in [0.01, 0.1, 1, 10]:
# network = network2.Network([784, 10], cost=network2.CrossEntropyCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, gmma=l1, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = str(l1) + '\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}".format(results) + '\n\n'
# f.write(buff)
# f.flush()
#
# f.close()
### - L1 normalization; expanded training set with affine transforms; plot convergence
# read in the data
logging.info("READING IN DATA...")
# for reading in normal dataset
training, validation, test = network2.load_data_wrapper("data/mnist_expanded.pkl.gz")
# logging.info("TASK 1.2 C...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_2c.csv", 'w')
#
# for _ in range(3):
# network = network2.Network([784, 10], cost=network2.CrossEntropyCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = network.SGD(training, 100, 100, 0.9, gmma=1, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# logging.info("WRITING RESULTS...")
# buff = "Iteration {}\n".format(_)
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}".format(results) + '\n\n'
# f.write(buff)
# f.flush()
#
# f.close()
## Task 1.3 - Effect of hidden layers; cross entropy; L1 normalization; expanded training set
### - Add one hidden layer with 30 nodes [784, 30, 10]; plot convergence
logging.info("TASK 1.3 A...")
logging.info("INITIALIZING NETWORK...")
f = open("task1_3a.csv", 'w')
for _ in range(3):
network = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
logging.info("TRAINING NETWORK...")
evaluation_cost, evaluation_accuracy, training_cost, training_accuracy, _ = network.SGD(training, 100, 100, 0.9, gmma=1, evaluation_data=validation)
logging.info("EVALUATING RESULTS...")
results = network.accuracy(test)
logging.info("WRITING RESULTS...")
buff = "Iteration {}\n".format(_)
f.write(buff)
buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
f.write(buff)
buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
f.write(buff)
buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
f.write(buff)
buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
f.write(buff)
buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
f.write(buff)
buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
f.write(buff)
buff = "test_acc,{}".format(results) + '\n\n'
f.write(buff)
f.flush()
f.close()
### - Add two hidden layers with 30 nodes [784, 30, 30, 10]; plot convergence; plot change rate of each weight in hidden layers
# logging.info("TASK 1.3 B...")
# logging.info("INITIALIZING NETWORK...")
#
# f = open("task1_3b.csv", 'w')
#
# for _ in range(3):
# network = network2.Network([784, 30, 30, 10], cost=network2.CrossEntropyCost)
#
# logging.info("TRAINING NETWORK...")
# evaluation_cost, evaluation_accuracy, training_cost, training_accuracy, weight_change = network.SGD(training, 100, 100, 0.9, gmma=1, evaluation_data=validation)
#
# logging.info("EVALUATING RESULTS...")
# results = network.accuracy(test)
#
# weight_np = np.array(weight_change)
#
# logging.info("WRITING RESULTS...")
# buff = "Iteration {}\n".format(_)
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_cost," + ','.join([str(i) for i in evaluation_cost]) + '\n'
# f.write(buff)
# buff = "train_cost," + ','.join([str(i) for i in training_cost]) + '\n\n'
# f.write(buff)
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# buff = "eval_acc," + ','.join([str(i) for i in evaluation_accuracy]) + '\n'
# f.write(buff)
# buff = "train_acc," + ','.join([str(i) for i in training_accuracy]) + '\n\n'
# f.write(buff)
# buff = "test_acc,{}".format(results) + '\n\n'
# f.write(buff)
# f.flush()
#
# buff = "epoch," + ','.join([str(i) for i in range(100)]) + '\n'
# f.write(buff)
# for i in range(weight_np.shape[1]):
# buff = "w_change_{},".format(i) + ','.join([str(j) for j in weight_change[:][i]]) + '\n'
# f.write(buff)
#
# f.flush()
#
# f.close()
### - (692) L1 normalization; expanded training set; dropout (several %-ages); plot convergence
| 35.829412
| 166
| 0.603924
| 1,699
| 12,182
| 4.244261
| 0.085344
| 0.054916
| 0.091527
| 0.08695
| 0.892248
| 0.868812
| 0.855637
| 0.855637
| 0.806962
| 0.798641
| 0
| 0.029421
| 0.190855
| 12,182
| 339
| 167
| 35.935103
| 0.702141
| 0.833607
| 0
| 0.294118
| 0
| 0
| 0.146567
| 0.015003
| 0.176471
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088235
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5297e53e73b4294c7ccbcefe66f4dbbeb267371
| 116
|
py
|
Python
|
mp1/assignment1/models/__init__.py
|
syfrankie/DS498-DL
|
b92a97156215f25d887435df20b556c45f1dd70e
|
[
"MIT"
] | null | null | null |
mp1/assignment1/models/__init__.py
|
syfrankie/DS498-DL
|
b92a97156215f25d887435df20b556c45f1dd70e
|
[
"MIT"
] | null | null | null |
mp1/assignment1/models/__init__.py
|
syfrankie/DS498-DL
|
b92a97156215f25d887435df20b556c45f1dd70e
|
[
"MIT"
] | 1
|
2021-02-23T03:34:07.000Z
|
2021-02-23T03:34:07.000Z
|
from models.SVM import *
from models.Perceptron import *
from models.Softmax import *
from models.Logistic import *
| 23.2
| 31
| 0.793103
| 16
| 116
| 5.75
| 0.4375
| 0.434783
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 116
| 4
| 32
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e549392ee5f3fd69edffe73886a7cd55fa69fec4
| 5,437
|
py
|
Python
|
tests/test_convert.py
|
sairamkiran9/table2ascii
|
9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16
|
[
"MIT"
] | 24
|
2021-04-27T07:10:32.000Z
|
2022-03-13T04:32:22.000Z
|
tests/test_convert.py
|
sairamkiran9/table2ascii
|
9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16
|
[
"MIT"
] | 11
|
2021-04-27T07:49:28.000Z
|
2022-02-27T12:46:56.000Z
|
tests/test_convert.py
|
sairamkiran9/table2ascii
|
9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16
|
[
"MIT"
] | 5
|
2021-07-30T00:19:29.000Z
|
2022-02-01T07:39:50.000Z
|
from table2ascii import alignment, table2ascii as t2a
import pytest
def test_header_body_footer():
text = t2a(
header=["#", "G", "H", "R", "S"],
body=[["1", "30", "40", "35", "30"], ["2", "30", "40", "35", "30"]],
footer=["SUM", "130", "140", "135", "130"],
first_col_heading=True,
)
expected = (
"╔═════╦═══════════════════════╗\n"
"║ # ║ G H R S ║\n"
"╟─────╫───────────────────────╢\n"
"║ 1 ║ 30 40 35 30 ║\n"
"║ 2 ║ 30 40 35 30 ║\n"
"╟─────╫───────────────────────╢\n"
"║ SUM ║ 130 140 135 130 ║\n"
"╚═════╩═══════════════════════╝"
)
assert text == expected
def test_body_footer():
text = t2a(
body=[["1", "30", "40", "35", "30"], ["2", "30", "40", "35", "30"]],
footer=["SUM", "130", "140", "135", "130"],
first_col_heading=True,
)
expected = (
"╔═════╦═══════════════════════╗\n"
"║ 1 ║ 30 40 35 30 ║\n"
"║ 2 ║ 30 40 35 30 ║\n"
"╟─────╫───────────────────────╢\n"
"║ SUM ║ 130 140 135 130 ║\n"
"╚═════╩═══════════════════════╝"
)
assert text == expected
def test_header_body():
text = t2a(
header=["#", "G", "H", "R", "S"],
body=[["1", "30", "40", "35", "30"], ["2", "30", "40", "35", "30"]],
first_col_heading=True,
)
expected = (
"╔═══╦═══════════════════╗\n"
"║ # ║ G H R S ║\n"
"╟───╫───────────────────╢\n"
"║ 1 ║ 30 40 35 30 ║\n"
"║ 2 ║ 30 40 35 30 ║\n"
"╚═══╩═══════════════════╝"
)
assert text == expected
def test_header_footer():
text = t2a(
header=["#", "G", "H", "R", "S"],
footer=["SUM", "130", "140", "135", "130"],
first_col_heading=True,
)
expected = (
"╔═════╦═══════════════════════╗\n"
"║ # ║ G H R S ║\n"
"╟─────╫───────────────────────╢\n"
"╟─────╫───────────────────────╢\n"
"║ SUM ║ 130 140 135 130 ║\n"
"╚═════╩═══════════════════════╝"
)
assert text == expected
def test_header():
text = t2a(
header=["#", "G", "H", "R", "S"],
first_col_heading=True,
)
expected = (
"╔═══╦═══════════════╗\n"
"║ # ║ G H R S ║\n"
"╟───╫───────────────╢\n"
"╚═══╩═══════════════╝"
)
assert text == expected
def test_body():
text = t2a(
body=[["1", "30", "40", "35", "30"], ["2", "30", "40", "35", "30"]],
first_col_heading=True,
)
expected = (
"╔═══╦═══════════════════╗\n"
"║ 1 ║ 30 40 35 30 ║\n"
"║ 2 ║ 30 40 35 30 ║\n"
"╚═══╩═══════════════════╝"
)
assert text == expected
def test_footer():
text = t2a(
footer=["SUM", "130", "140", "135", "130"],
first_col_heading=True,
)
expected = (
"╔═════╦═══════════════════════╗\n"
"╟─────╫───────────────────────╢\n"
"║ SUM ║ 130 140 135 130 ║\n"
"╚═════╩═══════════════════════╝"
)
assert text == expected
def test_header_footer_unequal():
with pytest.raises(ValueError):
t2a(
header=["H", "R", "S"],
footer=["SUM", "130", "140", "135", "130"],
first_col_heading=True,
)
def test_header_body_unequal():
with pytest.raises(ValueError):
t2a(
header=["#", "G", "H", "R", "S"],
body=[
["0", "45", "30", "32", "28"],
["1", "30", "40", "35", "30", "36"],
["2", "30", "40", "35", "30"],
],
first_col_heading=True,
)
def test_footer_body_unequal():
with pytest.raises(ValueError):
t2a(
body=[
["0", "45", "30", "32", "28"],
["1", "30", "40", "35", "30"],
["2", "30", "40", "35", "30"],
],
footer=["SUM", "130", "140", "135", "130", "36"],
first_col_heading=True,
)
def test_empty_header():
text = t2a(
header=[],
body=[["1", "30", "40", "35", "30"], ["2", "30", "40", "35", "30"]],
first_col_heading=True,
)
expected = (
"╔═══╦═══════════════════╗\n"
"║ 1 ║ 30 40 35 30 ║\n"
"║ 2 ║ 30 40 35 30 ║\n"
"╚═══╩═══════════════════╝"
)
assert text == expected
def test_empty_body():
text = t2a(
header=["#", "G", "H", "R", "S"],
body=[],
first_col_heading=True,
)
expected = (
"╔═══╦═══════════════╗\n"
"║ # ║ G H R S ║\n"
"╟───╫───────────────╢\n"
"╚═══╩═══════════════╝"
)
assert text == expected
def test_numeric_data():
text = t2a(
header=[1, "G", "H", "R", "S"],
body=[[1, 2, 3, 4, 5]],
footer=["A", "B", 1, 2, 3],
column_widths=[4, 5, 5, 4, 5],
first_col_heading=True,
)
expected = (
"╔════╦══════════════════════╗\n"
"║ 1 ║ G H R S ║\n"
"╟────╫──────────────────────╢\n"
"║ 1 ║ 2 3 4 5 ║\n"
"╟────╫──────────────────────╢\n"
"║ A ║ B 1 2 3 ║\n"
"╚════╩══════════════════════╝"
)
assert text == expected
| 26.915842
| 76
| 0.307523
| 620
| 5,437
| 4.08871
| 0.104839
| 0.03787
| 0.056805
| 0.07574
| 0.876134
| 0.840237
| 0.817751
| 0.751479
| 0.741223
| 0.715976
| 0
| 0.113558
| 0.356998
| 5,437
| 201
| 77
| 27.049751
| 0.349256
| 0
| 0
| 0.689655
| 0
| 0
| 0.336215
| 0.164245
| 0
| 0
| 0
| 0
| 0.057471
| 1
| 0.074713
| false
| 0
| 0.011494
| 0
| 0.086207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5814e08c4bbef9dd023b2c150cd5634dfc5236a
| 98
|
py
|
Python
|
env/lib/python3.6/site-packages/pandas/computation/api.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 4
|
2016-12-06T20:22:28.000Z
|
2018-05-04T09:51:45.000Z
|
env/lib/python3.6/site-packages/pandas/computation/api.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 11
|
2020-06-05T17:24:17.000Z
|
2022-03-11T23:15:26.000Z
|
env/lib/python3.6/site-packages/pandas/computation/api.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 3
|
2019-12-24T18:46:58.000Z
|
2021-09-04T11:57:13.000Z
|
# flake8: noqa
from pandas.computation.eval import eval
from pandas.computation.expr import Expr
| 19.6
| 40
| 0.816327
| 14
| 98
| 5.714286
| 0.571429
| 0.25
| 0.525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.122449
| 98
| 4
| 41
| 24.5
| 0.918605
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e5c14a5fbdf8135e3a3cc507ddd016137a8ddbb2
| 22,857
|
py
|
Python
|
scripts/custom_env_utils.py
|
mahaitongdae/Feasible-Policy-Optimization
|
1206ea6d01af3b14e3c4b1b4bb729d342cb38e92
|
[
"MIT"
] | null | null | null |
scripts/custom_env_utils.py
|
mahaitongdae/Feasible-Policy-Optimization
|
1206ea6d01af3b14e3c4b1b4bb729d342cb38e92
|
[
"MIT"
] | null | null | null |
scripts/custom_env_utils.py
|
mahaitongdae/Feasible-Policy-Optimization
|
1206ea6d01af3b14e3c4b1b4bb729d342cb38e92
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
def register_custom_env():
# finite time convergence test suite
config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
# finite time convergence test suite modification
'robot_placements': None, # Robot placements list (defaults to full extents)
'robot_locations': [[0.0, 0.0]], # Explicitly place robot XY coordinate
'robot_keepout': 0.0, # Needs to be set to match the robot XML used
# Hazardous areas
'hazards_placements': None, # Placements list for hazards (defaults to full extents)
'hazards_locations': [[-0.3, -0.3]], # Fixed locations to override placements
'hazards_keepout': 0.0, # Radius of hazard keepout for placement
'hazards_num': 1,
'hazards_size': 0.5,
'task': 'goal',
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
#observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': False, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': True, # Observe the vector from agent to hazards
'observe_vases': True, # Observe the vector from agent to vases
'observe_pillars': False, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': True, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': False, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False, # If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
#lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
#num setting
'vases_num': 0,
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomGoal1-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
'task': 'goal',
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
#observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': False, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': True, # Observe the vector from agent to hazards
'observe_vases': True, # Observe the vector from agent to vases
'observe_pillars': False, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': True, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': False, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False, # If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
#lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
#num setting
'hazards_num': 8,
'hazards_size': 0.45,
'vases_num': 0,
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomGoal2-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
config = config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
'task': 'goal',
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
#observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': False, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': False, # Observe the vector from agent to hazards
'observe_vases': False, # Observe the vector from agent to vases
'observe_pillars': True, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': False, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': True, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False, # If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
#lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
#num setting
'hazards_num': 0,
'hazards_size': 0.15,
'vases_num': 0,
# Pillars (immovable obstacles we should not touch)
# 'robot_keepout': 0.4,
'pillars_num': 8, # Number of pillars in the world
'pillars_placements': None, # Pillars placements list (defaults to full extents)
# 'pillars_locations': [], # Fixed locations to override placements
'pillars_keepout': 0.4, # Radius for placement of pillars
'pillars_size': 0.30, # Half-size (radius) of pillar objects
'pillars_height': 0.5, # Half-height of pillars geoms
'pillars_cost': 1.0, # Cost (per step) for being in contact with a pillar
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomGoalPillar2-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
config = config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
'task': 'goal',
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
#observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': False, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': False, # Observe the vector from agent to hazards
'observe_vases': False, # Observe the vector from agent to vases
'observe_pillars': True, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': False, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': True, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False, # If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
#lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
#num setting
'hazards_num': 0,
'hazards_size': 0.15,
'vases_num': 0,
# Pillars (immovable obstacles we should not touch)
# 'robot_keepout': 0.4,
'pillars_num': 8, # Number of pillars in the world
'pillars_placements': None, # Pillars placements list (defaults to full extents)
# 'pillars_locations': [], # Fixed locations to override placements
'pillars_keepout': 0.4, # Radius for placement of pillars
'pillars_size': 0.45, # Half-size (radius) of pillar objects
'pillars_height': 0.5, # Half-height of pillars geoms
'pillars_cost': 1.0, # Cost (per step) for being in contact with a pillar
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomGoalPillar3-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
'task': 'push',
'box_size': 0.2,
'box_null_dist': 0,
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
# observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': True, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': True, # Observe the vector from agent to hazards
'observe_vases': True, # Observe the vector from agent to vases
'observe_pillars': False, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': True, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': False, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False,
# If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
# lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
# num setting
'hazards_num': 1,
'hazards_size': 0.15,
'vases_num': 0,
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10,
# Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomPush1-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
config = {
'robot_base': 'xmls/point.xml', # dt in xml, default 0.002s for point
'task': 'push',
'box_size': 0.2,
'box_null_dist': 0,
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
# observe goal/box/...
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': True, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': True, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': True, # Observe the vector from agent to hazards
'observe_vases': True, # Observe the vector from agent to vases
'observe_pillars': False, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': True, # Constrain robot from being in hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': False, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
# cost discrete/continuous. As for AdamBA, I guess continuous cost is more suitable.
'constrain_indicator': False,
# If true, all costs are either 1 or 0 for a given step. If false, then we get dense cost.
# lidar setting
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_num_bins': 16,
# num setting
'hazards_num': 8,
'hazards_size': 0.30,
'vases_num': 0,
# dt perhaps?
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10,
# Number of draws trials in binomial distribution (max frameskip) # 经过验证,这个参数和xml的参数是等价的
'frameskip_binom_p': 1.0 # Probability of trial return (controls distribution)
}
env_id = 'Safexp-CustomPush2-v0'
register(id=env_id,
entry_point='safety_gym.envs.mujoco:Engine',
kwargs={'config': config})
| 56.576733
| 133
| 0.663867
| 2,816
| 22,857
| 5.267045
| 0.082386
| 0.036408
| 0.03843
| 0.017799
| 0.971211
| 0.964873
| 0.960895
| 0.959951
| 0.959951
| 0.959951
| 0
| 0.009849
| 0.25817
| 22,857
| 404
| 134
| 56.576733
| 0.864886
| 0.519535
| 0
| 0.907407
| 0
| 0
| 0.393608
| 0.042613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003704
| false
| 0
| 0.003704
| 0
| 0.007407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5c70a405e1f08ca0f01bd850d86b4f4ca0aa4e1
| 13,178
|
py
|
Python
|
qa327_test/test_R5.py
|
HenryTsui1/CISC327
|
7e9ebeb494568045d6f7b6281d9b78025593ace9
|
[
"MIT"
] | 1
|
2021-01-09T22:34:02.000Z
|
2021-01-09T22:34:02.000Z
|
qa327_test/test_R5.py
|
HenryTsui1/CISC327
|
7e9ebeb494568045d6f7b6281d9b78025593ace9
|
[
"MIT"
] | null | null | null |
qa327_test/test_R5.py
|
HenryTsui1/CISC327
|
7e9ebeb494568045d6f7b6281d9b78025593ace9
|
[
"MIT"
] | 1
|
2021-01-09T22:34:13.000Z
|
2021-01-09T22:34:13.000Z
|
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327.models import db, User, Ticket
from werkzeug.security import generate_password_hash, check_password_hash
# Moch a sample user
test_user = User(
email='test@test.com',
name='test_user',
password=generate_password_hash('Test!!'),
balance = 5000
)
# Moch some sample tickets
test_tickets = Ticket(
title='TestTest',
quantity=50,
price=50,
expDate=20201212
)
class R5Test(BaseCase):
# The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the first or the last character.(Positive)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_1_1(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#message")
self.assert_text("Updated", "#message")
# #The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the first or the last character.(negative - non-alphanumeric)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_1_2(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "@!@#$%^&")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Name Format Error", "#uMessage")
# # The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the first or the last character.(negative - space front, and space back)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_1_3(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "frontSpace ")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Name Format Error", "#uMessage")
self.type("#upd-name", " backSpace")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Name Format Error", "#uMessage")
# # The name of the ticket is no longer than 60 characters (Positive)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_1_4(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#message")
self.assert_text("Updated", "#message")
# # The name of the ticket is no longer than 60 characters (Negative)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_1_5(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTestaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Name Format Error", "#uMessage")
# # The quantity of the tickets has to be more than 0, and less than or equal to 100.(positive)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_2_1(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#message")
self.assert_text("Updated", "#message")
# The quantity of the tickets has to be more than 0, and less than or equal to 100.(negative - below and above range)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_2_2(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "-2")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Quantity", "#uMessage")
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "101")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Quantity", "#uMessage")
# # Price has to be of range [10, 100] (positive)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_3_1(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#message")
self.assert_text("Updated", "#message")
# # Price has to be of range [10, 100] (negative)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_3_1(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "5")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Price", "#uMessage")
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "101")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Price", "#uMessage")
# # Date must be given in the format YYYYMMDD (e.g. 20200901) (positive, only check that its an int of length 8)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_4_1(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122020")
self.click('input[id="upd-submit"]')
self.assert_element("#message")
self.assert_text("Updated", "#message")
# Date must be given in the format YYYYMMDD (e.g. 20200901) (negative, only check that its an int of length 8)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_4_2(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122022020200")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Date Format (YYYYMMDD)", "#uMessage")
# For any errors, redirect back to / and show an error message
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_5(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest@!@!@!@")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12121212")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Name Format Error", "#uMessage")
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "1000000")
self.type("#upd-price", "10")
self.type("#upd-exp", "12345678")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Quantity", "#uMessage")
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "15")
self.type("#upd-price", "-2")
self.type("#upd-exp", "12345678")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Price", "#uMessage")
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "10")
self.type("#upd-price", "10")
self.type("#upd-exp", "12122022020200")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Invalid Date Format (YYYYMMDD)", "#uMessage")
# The ticket of the given name must exist (positive)
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_tickets)
def test_R5_6(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "TestTest")
self.type("#upd-quantity", "20")
self.type("#upd-price", "10")
self.type("#upd-exp", "12345678")
self.click('input[id="upd-submit"]')
# The ticket of the given name must exist (negative)
@patch('qa327.backend.get_user', return_value=test_user)
def test_R5_7(self, *_):
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "test@test.com")
self.type("#password", "Test!!")
self.click('input[type="submit"]')
self.type("#upd-name", "randomRandomRandom")
self.type("#upd-quantity", "20")
self.type("#upd-price", "10")
self.type("#upd-exp", "12345678")
self.click('input[id="upd-submit"]')
self.assert_element("#uMessage")
self.assert_text("Ticket Does Not Exist", "#uMessage")
| 39.573574
| 195
| 0.609197
| 1,700
| 13,178
| 4.598235
| 0.085294
| 0.110528
| 0.112575
| 0.051554
| 0.897147
| 0.897147
| 0.897147
| 0.897147
| 0.883203
| 0.883203
| 0
| 0.042046
| 0.207695
| 13,178
| 332
| 196
| 39.692771
| 0.706637
| 0.09918
| 0
| 0.828358
| 0
| 0
| 0.341692
| 0.10309
| 0
| 0
| 0
| 0
| 0.141791
| 1
| 0.052239
| false
| 0.059701
| 0.022388
| 0
| 0.078358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
f9287840c293ef92915f8adb3680e4ea5c0c0cba
| 115,871
|
py
|
Python
|
CodeEntropy/FunctionCollection/EntropyFunctions.py
|
DonaldChung-HK/CodeEntropy
|
96162239babfbac2b386ab0c6979fee9689c60d2
|
[
"MIT"
] | null | null | null |
CodeEntropy/FunctionCollection/EntropyFunctions.py
|
DonaldChung-HK/CodeEntropy
|
96162239babfbac2b386ab0c6979fee9689c60d2
|
[
"MIT"
] | null | null | null |
CodeEntropy/FunctionCollection/EntropyFunctions.py
|
DonaldChung-HK/CodeEntropy
|
96162239babfbac2b386ab0c6979fee9689c60d2
|
[
"MIT"
] | null | null | null |
from ast import arg
import sys, os
import numpy as nmp
from CodeEntropy.ClassCollection import BeadClasses as BC
from CodeEntropy.ClassCollection import ConformationEntity as CONF
from CodeEntropy.ClassCollection import ModeClasses
from CodeEntropy.ClassCollection import CustomDataTypes
from CodeEntropy.FunctionCollection import CustomFunctions as CF
from CodeEntropy.FunctionCollection import GeometricFunctions as GF
from CodeEntropy.FunctionCollection import UnitsAndConversions as UAC
from CodeEntropy.FunctionCollection import Utils
from CodeEntropy.IO import Writer
from CodeEntropy.FunctionCollection import UnitsAndConversions as CONST
import multiprocessing as mp
from functools import partial
import pandas as pd
def calculate_entropy_per_dof(arg_frequencies, arg_temper):
"""
For each frequency that corresponds to a given dof,
it computes the entropy
using eqn (4) in the Higham et. al. 2018 paper and returns it
"""
exponent = CONST.PLANCK_CONST*arg_frequencies/UAC.get_KT2J(arg_temper)
expTermPositive = nmp.power(nmp.e, exponent)
expTermNegative = nmp.power(nmp.e, -exponent)
DOFEntropy = exponent/(expTermPositive - 1)
DOFEntropy -= nmp.log(1 - expTermNegative)
DOFEntropy *= CONST.GAS_CONST
return DOFEntropy
# END
def compute_frequency_from_lambda(arg_lambdas, arg_temper):
"""
For each lambda, compute the frequency.
F = sqrt(λ/kT)/2π
"""
return nmp.sqrt((arg_lambdas)/UAC.get_KT2J(arg_temper))/(2*nmp.pi)
#END
def compute_ampfac_from_lambda(arg_lambdas, arg_temper):
"""
For each mode (lambda), the amplitude factor is computed.
Amplitude
A_i = kT/sqrt(L_i) for all 'i' in 1:num. modes
Dim of A_i: sqrt([M]).L
Units of A_i: sqrt(amu).Ang
Ref: Macromolecule entropy from force, R. Henchman JCTC 2014
"""
afac = UAC.M2ANG * UAC.sqrtKG2AMU * nmp.divide(UAC.get_KT2J(arg_temper), nmp.sqrt(arg_lambdas))
# print("Ampl factor: ", afac)
return afac
#END
def get_avg_hpos(arg_atom, arg_frame, arg_selector, arg_hostDataContainer):
"""
Compute the average of the coordinates of the hydrogen
atoms covalently bonded to the atom with index `arg_atom` in a
given point in time `arg_frame` and return the value.
If no hydrogen is bonded to it, return a
random value for 3D cartesian coordinates.
"""
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
avgHPos = nmp.zeros((3))
#original argument SEL.Atomselection(arg_baseMolecule, f"BONDed {arg_atom}") & SEL.Atomselection(arg_baseMolecule, "hydrogen")
selH = allSel.select_atoms(f"name H* and bonded index {arg_atom}")
if selH.n_atoms != 0:
for iH in selH.indices:
iHPosition = arg_hostDataContainer._labCoords[arg_frame, iH]
avgHPos = nmp.add(avgHPos, iHPosition)
avgHPos /= selH.n_atoms
else:
# assign random position because
# eventually the only atom using that
# NB: basis will be the heavy atom which
# simply lies on the origin
avgHPos = nmp.random.random(3)
# transform the average H position to a
# coordinate system whose origin is the position of
# the heavy atom.
avgHPos = avgHPos - arg_hostDataContainer._labCoords[arg_frame, arg_atom]
return avgHPos
#END
def get_avg_apos(arg_atom, arg_frame, arg_selector, arg_hostDataContainer):
"""
Compute the average of the coordinates of the heavy
atoms covalently bonded to the atom with index `arg_atom` in a
given point in time `arg_frame` and return the value.
If no heavy atom is bonded to it, return a
random value for 3D cartesian coordinates.
"""
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
avgPos = nmp.zeros((3))
selHeavy = allSel.select_atoms(f"not name H* and bonded index {arg_atom}")
if selHeavy.n_atoms != 0:
for iA in selHeavy.indices:
iPosition = arg_hostDataContainer._labCoords[arg_frame, iA]
avgPos = nmp.add(avgPos, iPosition)
avgPos /= selHeavy.n_atoms
else:
# assign random position because
# eventually the only atom using that
# NB: basis will be the heavy atom which
# simply lies on the origin
avgPos = nmp.random.random(3)
# transform the average H position to a
# coordinate system whose origin is the position of
# the heavy atom.
avgPos = avgPos - arg_hostDataContainer._labCoords[arg_frame, arg_atom]
return avgPos
#END
def compute_entropy_whole_molecule_level(arg_hostDataContainer,
arg_outFile = None,
arg_selector = "all",
arg_moutFile = None,
arg_nmdFile = None,
arg_fScale = 1.0,
arg_tScale = 1.0,
arg_temper = 300.0,
arg_verbose = 3):
"""Conpute the entropy at the whole molecule level.
Determining translation and rotation axes is part of the function.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_outFile (str, optional): path to a output file output is written via append mode. Defaults to None.
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_moutFile (str, optional): print matrices if path to a matrices out file is not None. Defaults to None.
arg_nmdFile (str, optional): print modespectra if path to a spectra out file is not None. Defaults to None.
arg_fScale (float, optional): Force scale. Defaults to 1.0.
arg_tScale (float, optional): Torque scale. Defaults to 1.0.
arg_temper (float, optional): temperature in K. Defaults to 300.0.
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
tuple of floats:
entropyFF (float): Whole molecule level FF Entropy in J/mol/K
entropyTT (float): Whole molecule level TT Entropy in J/mol/K
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Hierarchy level. --> Whole molecule <--"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Hierarchy level. --> Whole molecule <--"))
Utils.printOut(arg_outFile,'-'*60)
# Define a bead collection at this level
wholeMoleculeSystem = BC.BeadCollection("whole_mol_bead", arg_hostDataContainer)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# define a bead representing the whole molecule
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
allAtomList = allSel.indices
wholeProteinBead = BC.Bead(arg_atomList= allAtomList, \
arg_numFrames=numFrames, \
arg_hostDataContainer = arg_hostDataContainer,\
arg_beadName = "WMOL",
arg_beadResi = 0,
arg_beadResn = "WMOL",
arg_beadChid = "X")
# add the bead to the bead colleciton
wholeMoleculeSystem.listOfBeads = [wholeProteinBead]
Utils.printflush(f"Total number of beads at the whole molecule level = {len(wholeMoleculeSystem.listOfBeads)}")
if arg_outFile != None:
Utils.printOut(arg_outFile,f"Total number of beads at the whole molecule level = {len(wholeMoleculeSystem.listOfBeads)}")
# reset weighted vectors for each bead and
for iBead in wholeMoleculeSystem.listOfBeads:
iBead.reset_totalWeightedVectors( (numFrames, 3) )
iBead.position = iBead.get_center_of_mass_lab(arg_frame = 0)
# reseting all the F-T combo matrices to zero
wholeMoleculeSystem.reinitialize_matrices()
# # and assign a representative position
# for iBead in wholeMoleculeSystem.listOfBeads:
# iBead.position = iBead.get_center_of_mass_lab(arg_frame = 0)
# setup translational and rotational axes
Utils.printflush("Assigning Translation and Rotation Axes @ whole molecule level->", end = ' ' )
# Use Princ. Axes COOR SYS.
# USE whole molecule principal axes COOR SYS for each atom
for iFrame in range(numFrames):
selMOI, selAxes = arg_hostDataContainer\
.get_principal_axes(arg_atomList = allAtomList,\
arg_frame = iFrame, arg_sorted=False)
selCOM = arg_hostDataContainer\
.get_center_of_mass(arg_atomList = allAtomList, \
arg_frame = iFrame)
arg_hostDataContainer.update_translationAxesArray_at(arg_frame = iFrame, arg_atomList = allAtomList, arg_pAxes = selAxes, arg_orig = selCOM)
arg_hostDataContainer.update_rotationAxesArray_at(arg_frame = iFrame, arg_atomList = allAtomList, arg_pAxes = selAxes, arg_orig = selCOM)
Utils.printflush("Done")
# update local coordinates
Utils.printflush("Updating Local coordinates->",end = ' ')
arg_hostDataContainer.update_localCoords_of_all_atoms(arg_type="R")
Utils.printflush('Done')
# update local forces
Utils.printflush("Updating Local forces->", end = ' ' )
arg_hostDataContainer.update_localForces_of_all_atoms(arg_type="T")
Utils.printflush('Done')
#update torques in the arg_hostDataContainer
Utils.printflush("Updating Local torques->", end = ' ')
for iFrame in range(numFrames):
for iAtom in allSel.indices:
coords_i = arg_hostDataContainer.localCoords[iFrame, iAtom]
forces_i = arg_hostDataContainer.localForces[iFrame, iAtom]
# arg_hostDataContainer.localTorques[iFrame,iAtom,:] = nmp.cross(coords_i,forces_i)
arg_hostDataContainer.localTorques[iFrame,iAtom,:] = CF.cross_product(coords_i,forces_i)
Utils.printflush('Done')
Utils.printflush("Weighting forces and torques->", end = ' ')
# mass weighting the forces and torques
for iBead in wholeMoleculeSystem.listOfBeads:
# mass weighting the forces for each bead (iBead) in each direction (j)
# inertia weighting the torques for each bead (iBead) in each direction (j)
for iFrame in range(numFrames):
# define local basis as the rotationalAxes of the first atom in the atomList of iBead
# doesnt matter because they all have the same R and T axes
iLocalBasis = arg_hostDataContainer.rotationAxesArray[iFrame][iBead.atomList[0]]
#get the moment of inertia tensor for ibead in thid local basis
beadMOITensor = iBead.get_moment_of_inertia_tensor_local(arg_localBasis = iLocalBasis, arg_frame = iFrame)
# get total force and torque in each direction and weight them
for iAtom in iBead.atomList:
iBead.totalWeightedForces[iFrame] += arg_hostDataContainer.localForces[iFrame,iAtom]
iBead.totalWeightedTorques[iFrame] += arg_hostDataContainer.localTorques[iFrame,iAtom]
iBead.totalWeightedForces[iFrame] /= nmp.sqrt(iBead.get_total_mass())
# weight total torque in each direction by √beadMOITensor[jj]
for j in range(3):
if nmp.isclose(iBead.totalWeightedTorques[iFrame,j], 0.0):
# then the beadMOITensor[j,j] must be close to 0 as well (machine precision wise)
# ensure that
assert(nmp.isclose(beadMOITensor[j,j] , 0.0))
else:
iBead.totalWeightedTorques[iFrame,j] /= nmp.sqrt(beadMOITensor[j,j])
Utils.printflush('Done')
# now fill in the matrices
Utils.printflush("Updating the submatrices ... ")
wholeMoleculeSystem.update_subMatrix(arg_pairString="FF",arg_verbose=arg_verbose)
wholeMoleculeSystem.update_subMatrix(arg_pairString="TT",arg_verbose=arg_verbose)
Utils.printflush('Done')
#make quadrant from subMatrices
# FF and TT quadrants must be symmetric
Utils.printflush("Generating Quadrants->",end = ' ')
ffQuadrant = wholeMoleculeSystem.generate_quadrant(arg_pairString="FF",arg_filterZeros=1)
ttQuadrant = wholeMoleculeSystem.generate_quadrant(arg_pairString="TT",arg_filterZeros=1)
# scale forces/torques of these quadrants
ffQuadrant = nmp.multiply(arg_fScale**2, ffQuadrant)
ttQuadrant = nmp.multiply(arg_tScale**2, ttQuadrant)
Utils.printflush("Done")
# print matrices if asked
if arg_moutFile:
Writer.write_a_matrix(arg_matrix = ffQuadrant, arg_descriptor = "FF COV AT WHOLE MOLECULE LEVEL", arg_outFile = arg_moutFile)
Writer.write_a_matrix(arg_matrix = ttQuadrant, arg_descriptor = "TT COV AT WHOLE MOLECULE LEVEL", arg_outFile = arg_moutFile)
# remove any row or column with zero axis
# this could have been done while generating quadrants. Can be merged if wished for
# ffQuadrant = wholeMoleculeSystem.filter_zero_rows_columns(ffQuadrant)
# ttQuadrant = wholeMoleculeSystem.filter_zero_rows_columns(ttQuadrant)
#diagnolaize
Utils.printflush("Diagonalizing->", end = ' ' )
lambdasFF, eigVectorsFF = Utils.diagonalize(ffQuadrant)
lambdasTT, eigVectorsTT = Utils.diagonalize(ttQuadrant)
Utils.printflush('Done')
# since eigen values can be complex numbers but with imag parts very close to zero
# use numpy's real_if_close with some tolerance to mask the imag parts
# Utils.printflush('Checking the nature of eigen values and conditioning them ...', end = ' ')
# tol = 1e+5
# lambdasFF = nmp.real_if_close(lambdasFF/1e+5, tol= tol)
# lambdasTT = nmp.real_if_close(lambdasTT/1e+5, tol= tol)
# Utils.printflush('Done')
# change to SI units
Utils.printflush('Changing the units of eigen values to SI units->', end = ' ')
lambdasFF = UAC.change_lambda_units(lambdasFF)
lambdasTT = UAC.change_lambda_units(lambdasTT)
Utils.printflush('Done')
# Create a spectrum to store these modes for
# proper output and analyses.
modeSpectraFF = []
modeSpectraTT = []
for midx, mcombo in enumerate(zip(lambdasFF, eigVectorsFF)):
fflmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(fflmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = fflmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(fflmb, arg_temper)
modeSpectraFF.append(newMode)
for midx, mcombo in enumerate(zip(lambdasTT, eigVectorsTT)):
ttlmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(ttlmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = ttlmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(ttlmb, arg_temper)
modeSpectraTT.append(newMode)
# assign spectra to the bead collection
wholeMoleculeSystem.assign_attribute("modeSpectraFF", modeSpectraFF)
wholeMoleculeSystem.assign_attribute("modeSpectraTT", modeSpectraTT)
# sorting the spectrum
Utils.printflush('Sorting spectrum in ascending order of frequencies->', end = ' ')
wholeMoleculeSystem.modeSpectraFF = ModeClasses.sort_modes(wholeMoleculeSystem.modeSpectraFF)
wholeMoleculeSystem.modeSpectraTT = ModeClasses.sort_modes(wholeMoleculeSystem.modeSpectraTT)
Utils.printflush('Done')
# Print modes if asked
if arg_nmdFile:
Writer.append_file(arg_nmdFile)
wholeMoleculeSystem.write_nmd_file(arg_nmdfile = arg_nmdFile, \
arg_spectrum = wholeMoleculeSystem.modeSpectraFF,
arg_wfac = [iBead.get_total_mass() for iBead in wholeMoleculeSystem.listOfBeads])
# compute entropy
entropyFF = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in wholeMoleculeSystem.modeSpectraFF]
entropyTT = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in wholeMoleculeSystem.modeSpectraTT]
# print final outputs
Utils.printflush("Entropy values:")
Utils.printflush(f"{'FF Entropy (Whole mol level)':<40s} : {nmp.sum(entropyFF):.4f} J/mol/K")
if arg_outFile != None:
Utils.printOut(arg_outFile, f"{'FF Entropy (Whole mol level)':<40s} : {nmp.sum(entropyFF):.4f} J/mol/K")
Utils.printflush(f"{'TT Entropy (Whole mol level)':<40s} : {nmp.sum(entropyTT):.4f} J/mol/K")
if arg_outFile != None:
Utils.printOut(arg_outFile, f"{'TT Entropy (Whole mol level)':<40s} : {nmp.sum(entropyTT):.4f} J/mol/K")
return (nmp.sum(entropyFF), nmp.sum(entropyTT))
#END
def compute_entropy_residue_level(arg_hostDataContainer,
arg_outFile = None,
arg_selector = "all",
arg_moutFile = None,
arg_nmdFile = None,
arg_fScale = 1.0,
arg_tScale = 1.0,
arg_temper = 300.0,
arg_axis_list = ['C', 'CA', 'N'],
arg_verbose = 3):
"""Computes the entropy calculations at the residue level
where each residue is treated as a separate bead.
Determining translation and rotation axes is part of the
function. A common translation axes are used for all residues
which is the principal axes of the whole molecule. The rotation
axes are specific to each residue, which can be specified.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_outFile (str, optional): path to a output file output is written via append mode. Defaults to None.
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_moutFile (str, optional): print matrices if path to a matrices out file is not None. Defaults to None.
arg_nmdFile (str, optional): print modespectra if path to a spectra out file is not None. Defaults to None.
arg_fScale (float, optional): Force scale. Defaults to 1.0.
arg_tScale (float, optional): Torque scale. Defaults to 1.0.
arg_temper (float, optional): temperature in K. Defaults to 300.0.
arg_axis_list (list, optional): the atom name of rotational axis of each residue. Defaults to ['C', 'CA', 'N'].
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
tuple of floats:
entropyFF (float): Residue level FF Entropy in J/mol/K
entropyTT (float): Residue level TT Entropy in J/mol/K
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Hierarchy level. --> Residues <--"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Hierarchy level. --> Residues <--"))
Utils.printOut(arg_outFile,'-'*60)
# define a bead collection at this level
residueSystem = BC.BeadCollection("res_bead",arg_hostDataContainer)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# define the residue beads and add
residueSystem.listOfBeads= []
# all atom selection
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
allAtoms = allSel.indices
for resindices in allSel.residues.resindices:
iResname = arg_hostDataContainer.universe.residues.resnames[resindices]
iResid = arg_hostDataContainer.universe.residues.resids[resindices]
resLabel = "{}{}".format(iResname, iResid)
Utils.printflush(resLabel)
resSel = allSel.select_atoms(f"resid {iResid}")
# caSel = resSel.select_atoms(f"name CA")
# caIdx = caSel.indices[0]
newBead = BC.Bead(arg_atomList = resSel.indices, \
arg_numFrames = numFrames, \
arg_hostDataContainer = arg_hostDataContainer,\
arg_beadName = resLabel,\
arg_beadResi = iResid,\
arg_beadResn = iResname,\
arg_beadChid = "X" )
# newBead.position = arg_hostDataContainer._labCoords[0,caIdx]
residueSystem.listOfBeads.append(newBead)
Utils.printflush(f"Total number of beads at the residue level = {len(residueSystem.listOfBeads)}")
# reset weighted vectors for each bead
for iBead in residueSystem.listOfBeads:
iBead.reset_totalWeightedVectors( (numFrames,3) )
iBead.position = iBead.get_center_of_mass_lab(arg_frame = 0)
# reseting all the F-T combo matrices to zero
residueSystem.reinitialize_matrices()
# setup translation axes
Utils.printflush("Assigning Translation Axes @ residue level->", end = ' ')
arg_hostDataContainer.reset_translationAxesArray()
# Use Princ. Axes COOR SYS.
for iFrame in range(numFrames):
selMOI, selAxes = arg_hostDataContainer\
.get_principal_axes(arg_atomList = allAtoms, \
arg_frame = iFrame, \
arg_sorted=False)
selCOM = arg_hostDataContainer\
.get_center_of_mass(arg_atomList = allAtoms, \
arg_frame = iFrame)
arg_hostDataContainer.update_translationAxesArray_at(arg_frame = iFrame, \
arg_atomList = allAtoms, \
arg_pAxes = selAxes, \
arg_orig = selCOM)
Utils.printflush("Done")
# setup rotational axes
Utils.printflush("Assigning Rotational Axes @ residue level->")
arg_hostDataContainer.reset_rotationAxesArray()
# for each residue, set the rotational axes to the c-ca-N axes
for resindices in allSel.residues.resindices:
iResid = arg_hostDataContainer.universe.residues.resids[resindices]
iResSel = allSel.select_atoms(f"resid {iResid}")
# Here you are selecting one atom so if you slice an array the shape will missmatch
a1Idx = iResSel.select_atoms(f"name {arg_axis_list[0]}").indices[0]
a2Idx = iResSel.select_atoms(f"name {arg_axis_list[1]}").indices[0]
a3Idx = iResSel.select_atoms(f"name {arg_axis_list[2]}").indices[0]
atoms_in_rid = iResSel.indices
for iFrame in range(numFrames):
a1Position = arg_hostDataContainer._labCoords[iFrame,a1Idx]
a2Position = arg_hostDataContainer._labCoords[iFrame,a2Idx]
a3Position = arg_hostDataContainer._labCoords[iFrame,a3Idx]
ridAxes, ridOrigin = GF.generate_orthonormal_axes_system(arg_coord1 = a1Position, \
arg_coord2 = a2Position, \
arg_coord3 = a3Position)
arg_hostDataContainer.update_rotationAxesArray_at(arg_frame = iFrame, \
arg_atomList = atoms_in_rid, \
arg_pAxes = ridAxes, \
arg_orig = ridOrigin)
if arg_verbose >= 3:
Utils.printflush('{:>5d}'.format(iResid), end = ' ')
if (iResid) % 5 == 0:
Utils.printflush('')
Utils.printflush("")
Utils.printflush("Done")
# update local forces
Utils.printflush("Updating Local forces->", end = ' ')
arg_hostDataContainer.update_localForces_of_all_atoms(arg_type = "T")
Utils.printflush('Done')
# update local coordinates
Utils.printflush("Updating Local coordinates->", end= ' ')
arg_hostDataContainer.update_localCoords_of_all_atoms(arg_type="R")
Utils.printflush('Done')
#update torques in the arg_hostDataContainer if asked for (arg_tScale != 0)
Utils.printflush("Updating Local torques->", end = ' ')
for iFrame in range(numFrames):
for iAtom in allSel.indices:
coords_i = arg_hostDataContainer.localCoords[iFrame, iAtom]
forces_i = arg_hostDataContainer.rotationAxesArray[iFrame, iAtom][0:3,]@arg_hostDataContainer._labForces[iFrame,iAtom]
arg_hostDataContainer.localTorques[iFrame,iAtom,:] = CF.cross_product(coords_i,forces_i)
Utils.printflush('Done')
# mass weighting the forces and torques
Utils.printflush('Weighting forces and torques->', end = ' ')
for iBead in residueSystem.listOfBeads:
# mass weighting the forces for each bead (iBead) in each direction (j)
#inertia weighting the torques for each bead (iBead) in each direction (j)
for iFrame in range(numFrames):
# get total torque and force and weigh them
for iAtom in iBead.atomList:
iBead.totalWeightedForces[iFrame] += arg_hostDataContainer.localForces[iFrame,iAtom]
iBead.totalWeightedTorques[iFrame] += arg_hostDataContainer.localTorques[iFrame,iAtom]
iBead.totalWeightedForces[iFrame] /= nmp.sqrt(iBead.get_total_mass())
# define local basis as the rotationalAxes of the first atom in the atomList of iBead
iLocalBasis = arg_hostDataContainer.rotationAxesArray[iFrame][iBead.atomList[0]]
beadMOITensor = iBead.get_moment_of_inertia_tensor_local(arg_localBasis = iLocalBasis, arg_frame = iFrame)
for j in range(3):
if nmp.isclose(iBead.totalWeightedTorques[iFrame,j] , 0.0):
# then the beadMOITensor[j,j] must be close to 0 as well (machine precision wise)
# ensure that
assert(nmp.isclose(beadMOITensor[j,j] , 0.0))
else:
iBead.totalWeightedTorques[iFrame,j] /= nmp.sqrt(beadMOITensor[j,j])
Utils.printflush('Done')
# now fill in the matrices
Utils.printflush("Updating the submatrices ... ")
residueSystem.update_subMatrix(arg_pairString="FF",arg_verbose=arg_verbose)
residueSystem.update_subMatrix(arg_pairString="TT",arg_verbose=arg_verbose)
Utils.printflush('Done')
#make quadrant from subMatrices
Utils.printflush("Generating Quadrants->",end = ' ')
ffQuadrant = residueSystem.generate_quadrant(arg_pairString="FF",arg_filterZeros=0)
ttQuadrant = residueSystem.generate_quadrant(arg_pairString="TT",arg_filterZeros=0)
Utils.printflush("Done")
# scale forces/torques of these quadrants
ffQuadrant = nmp.multiply(arg_fScale**2, ffQuadrant)
ttQuadrant = nmp.multiply(arg_tScale**2, ttQuadrant)
# print matrices if asked
if arg_moutFile:
Writer.write_a_matrix(arg_matrix = ffQuadrant, arg_descriptor = "FF COV AT RESIDUE LEVEL", arg_outFile = arg_moutFile)
Writer.write_a_matrix(arg_matrix = ttQuadrant, arg_descriptor = "TT COV AT RESIDUE LEVEL", arg_outFile = arg_moutFile)
# remove any row or column with zero axis
# this could have been done while generating quadrants. Can be merged if wished for
# ffQuadrant = residueSystem.filter_zero_rows_columns(ffQuadrant)
# ttQuadrant = residueSystem.filter_zero_rows_columns(ttQuadrant)
#diagnolaize
Utils.printflush("Diagonalizing->", end = ' ')
lambdasFF, eigVectorsFF = Utils.diagonalize(ffQuadrant)
#Fix here
# lambdasFF[lambdasFF < 1e-14] = 1e-17
lambdasTT, eigVectorsTT = Utils.diagonalize(ttQuadrant)
# lambdasTT[lambdasTT < 1e-14] = 1e-17
Utils.printflush('Done')
# since eigen values can be complex numbers but with imag parts very close to zero
# use numpy's real_if_close with some tolerance to mask the imag parts
# Utils.printflush('Checking the nature of eigen values and conditioning them ...', end = ' ')
# tol = 1e+5
# lambdasFF = nmp.real_if_close(lambdasFF/1e+5, tol= tol)
# lambdasTT = nmp.real_if_close(lambdasTT/1e+5, tol= tol)
# Utils.printflush('Done')
# change to SI units
Utils.printflush('Changing the units of eigen values to SI units->', end = ' ')
lambdasFF = UAC.change_lambda_units(lambdasFF)
lambdasTT = UAC.change_lambda_units(lambdasTT)
Utils.printflush('Done')
# Create a spectrum to store these modes for
# proper output and analyses.
modeSpectraFF = []
for midx, mcombo in enumerate(zip(lambdasFF, eigVectorsFF)):
fflmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(fflmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = fflmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(fflmb, arg_temper)
modeSpectraFF.append(newMode)
residueSystem.assign_attribute("modeSpectraFF", modeSpectraFF)
modeSpectraTT = []
for midx, mcombo in enumerate(zip(lambdasTT, eigVectorsTT)):
ttlmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(ttlmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = ttlmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(ttlmb, arg_temper)
modeSpectraTT.append(newMode)
residueSystem.assign_attribute("modeSpectraTT", modeSpectraTT)
# sorting the spectrum
Utils.printflush('Sorting spectrum in ascending order of frequencies->', end = ' ')
residueSystem.modeSpectraFF = ModeClasses.sort_modes(residueSystem.modeSpectraFF)
residueSystem.modeSpectraTT = ModeClasses.sort_modes(residueSystem.modeSpectraTT)
Utils.printflush('Done')
# Print modes if asked
if arg_nmdFile:
Writer.append_file(arg_nmdFile)
residueSystem.write_nmd_file(arg_nmdfile = arg_nmdFile, \
arg_spectrum = residueSystem.modeSpectraFF,\
arg_wfac = [iBead.get_total_mass() for iBead in residueSystem.listOfBeads])
# compute entropy
# 1. remove the smallest 6 freqs from FF sprectrum
# because they may be overlapping with whole molecule
# These 6 low frequency modes capture the translation and rotation at
# whole molecule level
# 2. DO NOT remove any freq from TT spectrum because
# they are uncoupled to any TT freq in any other hierarchy
entropyFF = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in residueSystem.modeSpectraFF[6:]]
entropyTT = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in residueSystem.modeSpectraTT[0:]]
#sum the total
totEntropyFF = nmp.sum(entropyFF)
totEntropyTT = nmp.sum(entropyTT)
# print final outputs
Utils.printflush("Entropy values:")
# print final outputs
Utils.printflush(f"{'FF Entropy (Residue level)':<40s} : {totEntropyFF:.4f} J/mol/K")
Utils.printflush(f"{'TT Entropy (Residue level)':<40s} : {totEntropyTT:.4f} J/mol/K")
if arg_outFile != None:
Utils.printOut(arg_outFile,f"{'FF Entropy (Residue level)':<40s} : {totEntropyFF:.4f} J/mol/K")
Utils.printOut(arg_outFile,f"{'TT Entropy (Residue level)':<40s} : {totEntropyTT:.4f} J/mol/K")
return (totEntropyFF, totEntropyTT)
#END
def UA_residue_protein(allSel,
arg_hostDataContainer,
numFrames,
heavyAtomArray,
arg_fScale,
arg_tScale,
arg_temper,
arg_outFile,
arg_selector,
arg_verbose,
arg_moutFile,
arg_nmdFile,
arg_axis_list,
resindices):
"""
Support function for calculating UA level entropy for each residue. This function is to break down work into a function for parallel processing
Args:
Args correspond to variables in CodeEntropy.FunctionCollection.EntropyFunctions.compute_entropy_UA_level_multiprocess
Returns:
Tuple of floats:
entropyFF (float): UA level FF Entropy for current residue of resindices
entropyTT (float): UA level TT Entropy for current residue of resindices
"""
iResname = arg_hostDataContainer.universe.residues.resnames[resindices]
iResid = arg_hostDataContainer.universe.residues.resids[resindices]
resLabel = "{}{}".format(iResname, iResid)
# Utils.printflush('Working on resid : {}'.format(resLabel))
# create a bead collection
ridBeadCollection = BC.BeadCollection("{}_bead".format(resLabel),arg_hostDataContainer)
ridBeadCollection.listOfBeads = []
# add UA beads to it (a heavy atom and its bonded hydrogens make a bead)
resSel = allSel.select_atoms(f"resid {iResid}")
a1Idx = resSel.select_atoms(f"name {arg_axis_list[0]}").indices[0]
a2Idx = resSel.select_atoms(f"name {arg_axis_list[1]}").indices[0]
a3Idx = resSel.select_atoms(f"name {arg_axis_list[2]}").indices[0]
resHeavySel = resSel.select_atoms(f"not name H*")
for iheavy in resHeavySel.indices:
# GRP := (a heavy atom and its bonded hydrogens make a bead)
igrp = allSel.select_atoms(f"index {iheavy} or (name H* and bonded index {iheavy})")
# heavy atom name
iName = allSel.atoms.names[iheavy]
# create a bead
newBead = BC.Bead(arg_atomList=igrp.indices,\
arg_hostDataContainer=arg_hostDataContainer,\
arg_numFrames=numFrames,\
arg_beadName = iName,\
arg_beadResi = iResid,\
arg_beadResn = iResname,\
arg_beadChid = "X")
newBead.position = arg_hostDataContainer._labCoords[0, iheavy]
ridBeadCollection.listOfBeads.append(newBead)
# by this point, the UA beads for that residue have been created
# Utils.printflush('Total number of UA beads in residue {} : {}'\
# .format(resLabel, len(ridBeadCollection.listOfBeads)))
# reset weighted vectors for each bead
for iBead in ridBeadCollection.listOfBeads:
iBead.reset_totalWeightedVectors( (numFrames,3) )
# reseting all the F-T combo matrices to zero
ridBeadCollection.reinitialize_matrices()
# setup Translation and Rotation axes
# Translation axes : each atom is in the c-ca-n axes of its host residue
# Utils.printflush("Assigning Translation Axes at the UA level->", end = ' ')
for iFrame in range(numFrames):
a1Position = arg_hostDataContainer._labCoords[iFrame,a1Idx]
a2Position = arg_hostDataContainer._labCoords[iFrame,a2Idx]
a3Position = arg_hostDataContainer._labCoords[iFrame,a3Idx]
tAxes, tOrigin = GF.generate_orthonormal_axes_system(arg_coord1 = a1Position, \
arg_coord2 = a2Position, \
arg_coord3 = a3Position)
arg_hostDataContainer.update_translationAxesArray_at(iFrame, resSel.indices, tAxes, tOrigin)
# Utils.printflush('Done')
# Utils.printflush("Assigning Rotational Axes at the UA level->", end = ' ')
# Rotation axes :
# the axes will have the geometry of a
# local spherical-polar coordinate system
# assigned locally to each UA bead.
# See Chakravorty et. al. 2020 on the math behind it.
for iBead in ridBeadCollection.listOfBeads:
# fetch its heavy atom
iheavy = list(filter(lambda idx: idx in heavyAtomArray, iBead.atomList))
try:
# check that these is only one heavy atom in the bead
assert(len(iheavy) == 1)
except:
raise ValueError(f"An united atom bead cannot have more than one heavy atom. {len(iheavy)} found.")
iheavy = iheavy[0]
for iFrame in range(numFrames):
# from each of the hydrogen atoms bonded to it
# get the average position lab coordinate
avgHydrogenPosition = get_avg_hpos(arg_atom= iheavy, \
arg_frame = iFrame, \
arg_selector = arg_selector, \
arg_hostDataContainer = arg_hostDataContainer)
# use the resultant vector to generate an
# orthogonal local coordinate axes system
# with origin at the heavy atom position
heavyOrigin = arg_hostDataContainer._labCoords[iFrame, iheavy]
iAtomBasis = GF.get_sphCoord_axes(arg_r=avgHydrogenPosition)
arg_hostDataContainer.update_rotationAxesArray_at(arg_frame = iFrame, \
arg_atomList = iBead.atomList, \
arg_pAxes = iAtomBasis, \
arg_orig = heavyOrigin)
arg_hostDataContainer.update_localCoords("R", iBead.atomList)
# Utils.printflush('Done')
# update local forces
# Utils.printflush('Updating Local forces->',end=' ')
arg_hostDataContainer.update_localForces("T", resSel.indices)
# Utils.printflush('Done')
# update torques using the local rotational axes
# Utils.printflush('Updating Local torques->', end = ' ')
for iAtom_in_rid in resSel.indices:
for iFrame in range(numFrames):
coords_i = arg_hostDataContainer.localCoords[iFrame, iAtom_in_rid]
forces_i = arg_hostDataContainer.rotationAxesArray[iFrame, iAtom_in_rid][0:3,]@arg_hostDataContainer._labForces[iFrame,iAtom_in_rid]
arg_hostDataContainer.localTorques[iFrame,iAtom_in_rid,:] = CF.cross_product(coords_i,forces_i)
# Utils.printflush('Done')
# mass weighting the forces and torque
# Utils.printflush('Weighting forces and torques->', end = ' ')
for iBead in ridBeadCollection.listOfBeads:
for iFrame in range(numFrames):
# mass weighting the forces for each bead (iBead) in each direction (j)
# inertia weighting the torques for each bead (iBead) in each direction (j)
for iAtom in iBead.atomList:
iBead.totalWeightedForces[iFrame] += arg_hostDataContainer.localForces[iFrame, iAtom]
iBead.totalWeightedTorques[iFrame] += arg_hostDataContainer.localTorques[iFrame, iAtom]
iBead.totalWeightedForces[iFrame] /= nmp.sqrt(iBead.get_total_mass())
# define local basis as the rotationalAxes of the first atom in the atomList of iBead
iLocalBasis = arg_hostDataContainer.rotationAxesArray[iFrame][iBead.atomList[0]]
beadMOITensor = iBead.get_moment_of_inertia_tensor_local(arg_localBasis = iLocalBasis, arg_frame = iFrame)
# get total torque and force in each direction and weight them by √beadMOITensor[jj]
for j in range(3):
try:
if nmp.isclose(iBead.totalWeightedTorques[iFrame,j] , 0.0):
# then the beadMOITensor[j,j] must be 0 as well
# ensure that
assert(nmp.isclose(beadMOITensor[j,j] , 0.0))
else:
# inertia weight the total torque component
iBead.totalWeightedTorques[iFrame,j] /= nmp.sqrt(beadMOITensor[j,j])
except:
raise AssertionError(f"Moment of Intertia is non-zero for a bead lying on axis {j}")
# Utils.printflush('Done')
# now fill in the matrices
# Utils.printflush("Updating the submatrices ... ")
ridBeadCollection.update_subMatrix(arg_pairString="FF",arg_verbose=arg_verbose)
ridBeadCollection.update_subMatrix(arg_pairString="TT",arg_verbose=arg_verbose)
# Utils.printflush('Done')
#make quadrant from subMatrices
# Utils.printflush("Generating Quadrants->",end = ' ')
ffQuadrant = ridBeadCollection.generate_quadrant(arg_pairString="FF",arg_filterZeros=0)
ttQuadrant = ridBeadCollection.generate_quadrant(arg_pairString="TT",arg_filterZeros=0)
# Utils.printflush("Done")
# scale forces/torques of these quadrants
ffQuadrant = nmp.multiply(arg_fScale**2, ffQuadrant)
ttQuadrant = nmp.multiply(arg_tScale**2, ttQuadrant)
# remove any row or column with zero axis
# this could have been done while generating quadrants. Can be merged if wished for
ffQuadrant = ridBeadCollection.filter_zero_rows_columns(ffQuadrant)
ttQuadrant = ridBeadCollection.filter_zero_rows_columns(ttQuadrant)
# print matrices if asked
if arg_moutFile:
Writer.write_a_matrix(arg_matrix = ffQuadrant\
, arg_descriptor = "FF COV AT UNITED ATOM LEVEL FOR RES {}".format(resLabel)\
, arg_outFile = arg_moutFile)
Writer.write_a_matrix(arg_matrix = ttQuadrant\
, arg_descriptor = "TT COV AT UNITED ATOM LEVEL FOR RES {}".format(resLabel)\
, arg_outFile = arg_moutFile)
#diagnolaize
# Utils.printflush("Diagonalizing->", end = ' ')
lambdasFF, eigVectorsFF = Utils.diagonalize(ffQuadrant)
lambdasTT, eigVectorsTT = Utils.diagonalize(ttQuadrant)
# Utils.printflush('Done')
# since eigen values can be complex numbers
# but with imag parts very close to zero
# use numpy's real_if_close with some tolerance to mask the imag parts
# Utils.printflush('Checking the nature of eigen values and conditioning them ...', end = ' ')
# tol = 1e+5
# lambdasFF = nmp.real_if_close(lambdasFF/1e+5, tol= tol)
# lambdasTT = nmp.real_if_close(lambdasTT/1e+5, tol= tol)
# Utils.printflush('Done')
# filter real zero values
lambdasFF = nmp.asarray([lm for lm in lambdasFF if not nmp.isclose(lm, 0.0)])
lambdasTT = nmp.asarray([lm for lm in lambdasTT if not nmp.isclose(lm, 0.0)])
# change to SI units
# Utils.printflush('Changing the units of eigen values to SI units->', end = ' ')
lambdasFF = UAC.change_lambda_units(lambdasFF)
lambdasTT = UAC.change_lambda_units(lambdasTT)
# Utils.printflush('Done')
# Create a spectrum to store these modes for
# proper output and analyses.
modeSpectraFF = []
for midx, mcombo in enumerate(zip(lambdasFF, eigVectorsFF)):
fflmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(fflmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = fflmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(fflmb, arg_temper)
modeSpectraFF.append(newMode)
ridBeadCollection.assign_attribute("modeSpectraFF", modeSpectraFF)
modeSpectraTT = []
for midx, mcombo in enumerate(zip(lambdasTT, eigVectorsTT)):
ttlmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(ttlmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = ttlmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(ttlmb, arg_temper)
modeSpectraTT.append(newMode)
ridBeadCollection.assign_attribute("modeSpectraTT", modeSpectraTT)
# sorting the spectrum
# Utils.printflush('Sorting spectrum in ascending order of frequencies->', end = ' ')
ridBeadCollection.modeSpectraFF = ModeClasses.sort_modes(ridBeadCollection.modeSpectraFF)
ridBeadCollection.modeSpectraTT = ModeClasses.sort_modes(ridBeadCollection.modeSpectraTT)
# Utils.printflush('Done')
# Print modes if asked
if arg_nmdFile:
Writer.append_file(arg_nmdFile)
ridBeadCollection.write_nmd_file(arg_nmdfile = arg_nmdFile, \
arg_spectrum = ridBeadCollection.modeSpectraFF, \
arg_wfac = [iBead.get_total_mass() for iBead in ridBeadCollection.listOfBeads])
# compute entropy
# 1. remove the smallest 6 freqs from FF sprectrum
# because they may be overlapping with residue level motions
# 2. DO NOT remove any freq from TT spectrum because
# they are uncoupled to any TT freq in any other hierarchy
entropyFF = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in ridBeadCollection.modeSpectraFF[6:]]
entropyTT = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in ridBeadCollection.modeSpectraTT[0:]]
ridTotalEntropyFF = nmp.sum(entropyFF)
ridTotalEntropyTT = nmp.sum(entropyTT)
# print final outputs
# Utils.printflush("Entropy values:")
# Utils.printflush('{:<40s} : {:.4f} J/mol/K'.format('FF Entropy (UA for {})'.format(resLabel), ridTotalEntropyFF))
# Utils.printflush('{:<40s} : {:.4f} J/mol/K'.format('TT Entropy (UA for {})'.format(resLabel), ridTotalEntropyTT))
# dataframe here
# Utils.printOut(arg_outFile,'UATOM {:<10}{:>5}{:>12.3f}{:>12.3f}'\
# .format(iResname\
# , iResid\
# , ridTotalEntropyFF\
# , ridTotalEntropyTT))
# newRowSolvent = pd.DataFrame({'RESNAME': iResname,
# 'RESID':iResid,
# 'FF_ENTROPY': ridTotalEntropyFF,
# 'TT_ENTROPY': ridTotalEntropyTT}, index=[0])
Utils.printflush("\n\n")
return (iResname, iResid, ridTotalEntropyFF, ridTotalEntropyTT)
def compute_entropy_UA_level_multiprocess(arg_hostDataContainer,
arg_outFile,
arg_selector = "all",
arg_moutFile = None,
arg_nmdFile = None,
arg_fScale = 1.0,
arg_tScale = 1.0,
arg_temper = 300.0,
arg_verbose = 3,
arg_csv_out = None,
arg_axis_list = ['C', 'CA', 'N'],
arg_thread = 4):
"""
!! This uses multiprocess to spread workload across cores to speed up calculation.
However, this will cause print and output to files not print in sequential order.
Computes the entropy calculations at the united atom (UA) level.
Each heavy atom with its covalently bonded H-atoms make a single bead. H-atoms
are, however, treated explicitly.Determining translation and rotation axes is
part of the function. Translation axes for each bead is the C-Ca-N axes of the
residue the bead is part of. The rotation axes is a basis whose axes are directed
along a sphereical-coordinate axes comprised of unit vectors along r,θ and Φ.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_outFile (str): path to a output file output is written via append mode
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_moutFile (str, optional): print matrices if path to a matrices out file is not None. Defaults to None.
arg_nmdFile (str, optional): print modespectra if path to a spectra out file is not None. Defaults to None.
arg_fScale (float, optional): Force scale. Defaults to 1.0.
arg_tScale (float, optional): Torque scale. Defaults to 1.0.
arg_temper (float, optional): temperature in K. Defaults to 300.0.
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
arg_csv_out (str, optional): print entropy of each residue as sorted dataframe if path to a csv out file is not None. Defaults to None.
arg_thread (int, optional): number of process to spawn for parallarization.
arg_axis_list (list, optional): the atom name of rotational axis of each residue. Defaults to ['C', 'CA', 'N'].
Returns:
tuple of floats:
entropyFF (float): United atom level FF Entropy in J/mol/K
entropyTT (float): United atom level TT Entropy in J/mol/K
"""
# Utils.hbar(60)
# Utils.printflush("{:^60}".format("Hierarchy level. --> United Atom <--"))
# Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Hierarchy level. --> United Atom <-- parallel mode, log disabled"))
Utils.printOut(arg_outFile,'-'*60)
# Select Scope
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# preparing header for output file
if arg_outFile != None:
Utils.printOut(arg_outFile,f" {'RESNAME':<10s}{'RESID':>5s}{'FF_ENTROPY':>12s}{'TT_ENTROPY':>12s}")
# initialize total entropy values
totalUAEntropyFF = 0.
totalUAEntropyTT = 0.
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
#reset
arg_hostDataContainer.reset_rotationAxesArray()
arg_hostDataContainer.reset_translationAxesArray()
#get the heavy Atom List for filtering
heavyAtomArray = allSel.select_atoms("not name H*").indices
pool = mp.Pool(arg_thread)
f = partial(UA_residue_protein, allSel, arg_hostDataContainer, numFrames, heavyAtomArray, arg_fScale, arg_tScale, arg_temper, arg_outFile, arg_selector, arg_verbose, arg_moutFile, arg_nmdFile, arg_axis_list)
items = allSel.residues.resindices
result = pool.map(f, items)
pool.close()
pool.join()
result_df = pd.DataFrame(result, columns=['RESNAME', 'RESID', 'FF_ENTROPY(J/mol/K)', 'TT_ENTROPY(J/mol/K)'])
result_df = result_df.sort_values('RESID')
print(result_df)
totalUAEntropyFF = result_df['FF_ENTROPY(J/mol/K)'].sum()
totalUAEntropyTT = result_df['TT_ENTROPY(J/mol/K)'].sum()
# Final information
# Utils.hbar(60)
# Utils.printflush(f"{'Total Entropy FF (UA level)':<25} : {totalUAEntropyFF:>15.3f} J/mol/K")
# Utils.printflush(f"{'Total Entropy TT (UA level)':<25} : {totalUAEntropyTT:>15.3f} J/mol/K")
# Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'_'*60)
Utils.printOut(arg_outFile,f"{'Total Entropy FF (UA level)':<25} : {totalUAEntropyFF:>15.3f} J/mol/K")
Utils.printOut(arg_outFile,f"{'Total Entropy TT (UA level)':<25} : {totalUAEntropyTT:>15.3f} J/mol/K")
Utils.printOut(arg_outFile,'-'*60)
if arg_csv_out != None:
result_df.to_csv(arg_csv_out, index=False)
return (totalUAEntropyFF, totalUAEntropyTT, result_df)
#END
def compute_entropy_UA_level(arg_hostDataContainer,
arg_outFile,
arg_selector = "all",
arg_moutFile = None,
arg_nmdFile = None,
arg_fScale = 1.0,
arg_tScale = 1.0,
arg_temper = 300.0,
arg_csv_out = None,
arg_axis_list = ['C', 'CA', 'N'],
arg_verbose = 3):
"""
Computes the entropy calculations at the united atom (UA) level.
Each heavy atom with its covalently bonded H-atoms make a single bead. H-atoms
are, however, treated explicitly.Determining translation and rotation axes is
part of the function. Translation axes for each bead is the C-Ca-N axes of the
residue the bead is part of. The rotation axes is a basis whose axes are directed
along a sphereical-coordinate axes comprised of unit vectors along r,θ and Φ.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_outFile (str): path to a output file output is written via append mode
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_moutFile (str, optional): print matrices if path to a matrices out file is not None. Defaults to None.
arg_nmdFile (str, optional): print modespectra if path to a matrices out file is not None. Defaults to None.
arg_fScale (float, optional): Force scale. Defaults to 1.0.
arg_tScale (float, optional): Torque scale. Defaults to 1.0.
arg_temper (float, optional): temperature in K. Defaults to 300.0.
arg_csv_out (str, optional): print entropy of each residue as sorted dataframe if path to a csv out file is not None. Defaults to None.
arg_axis_list (list, optional): the atom name of rotational axis of each residue. Defaults to ['C', 'CA', 'N'].
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
tuple of floats:
entropyFF (float): United atom level FF Entropy in J/mol/K
entropyTT (float): United atom level TT Entropy in J/mol/K
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Hierarchy level. --> United Atom <--"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Hierarchy level. --> United Atom <--"))
Utils.printOut(arg_outFile,'-'*60)
# Select Scope
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# preparing header for output file
if arg_outFile != None:
Utils.printOut(arg_outFile,f" {'RESNAME':<10s}{'RESID':>5s}{'FF_ENTROPY':>12s}{'TT_ENTROPY':>12s}")
# initialize total entropy values
totalUAEntropyFF = 0.
totalUAEntropyTT = 0.
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
#reset
arg_hostDataContainer.reset_rotationAxesArray()
arg_hostDataContainer.reset_translationAxesArray()
#get the heavy Atom List for filtering
heavyAtomArray = allSel.select_atoms("not name H*").indices
result = []
# for each residue:
for resindices in allSel.residues.resindices:
iResname = arg_hostDataContainer.universe.residues.resnames[resindices]
iResid = arg_hostDataContainer.universe.residues.resids[resindices]
resLabel = "{}{}".format(iResname, iResid)
Utils.printflush('Working on resid : {}'.format(resLabel))
# create a bead collection
ridBeadCollection = BC.BeadCollection("{}_bead".format(resLabel),arg_hostDataContainer)
ridBeadCollection.listOfBeads = []
# add UA beads to it (a heavy atom and its bonded hydrogens make a bead)
resSel = allSel.select_atoms(f"resid {iResid}")
a1Idx = resSel.select_atoms(f"name {arg_axis_list[0]}").indices[0]
a2Idx = resSel.select_atoms(f"name {arg_axis_list[1]}").indices[0]
a3Idx = resSel.select_atoms(f"name {arg_axis_list[2]}").indices[0]
resHeavySel = resSel.select_atoms(f"not name H*")
for iheavy in resHeavySel.indices:
# GRP := (a heavy atom and its bonded hydrogens make a bead)
igrp = allSel.select_atoms(f"index {iheavy} or (name H* and bonded index {iheavy})")
# heavy atom name
iName = allSel.atoms.names[iheavy]
# create a bead
newBead = BC.Bead(arg_atomList=igrp.indices,\
arg_hostDataContainer=arg_hostDataContainer,\
arg_numFrames=numFrames,\
arg_beadName = iName,\
arg_beadResi = iResid,\
arg_beadResn = iResname,\
arg_beadChid = "X")
newBead.position = arg_hostDataContainer._labCoords[0, iheavy]
ridBeadCollection.listOfBeads.append(newBead)
# by this point, the UA beads for that residue have been created
Utils.printflush('Total number of UA beads in residue {} : {}'\
.format(resLabel, len(ridBeadCollection.listOfBeads)))
# reset weighted vectors for each bead
for iBead in ridBeadCollection.listOfBeads:
iBead.reset_totalWeightedVectors( (numFrames,3) )
# reseting all the F-T combo matrices to zero
ridBeadCollection.reinitialize_matrices()
# setup Translation and Rotation axes
# Translation axes : each atom is in the c-ca-n axes of its host residue
Utils.printflush("Assigning Translation Axes at the UA level->", end = ' ')
for iFrame in range(numFrames):
a1Position = arg_hostDataContainer._labCoords[iFrame,a1Idx]
a2Position = arg_hostDataContainer._labCoords[iFrame,a2Idx]
a3Position = arg_hostDataContainer._labCoords[iFrame,a3Idx]
tAxes, tOrigin = GF.generate_orthonormal_axes_system(arg_coord1 = a1Position, \
arg_coord2 = a2Position, \
arg_coord3 = a3Position)
arg_hostDataContainer.update_translationAxesArray_at(iFrame, resSel.indices, tAxes, tOrigin)
Utils.printflush('Done')
Utils.printflush("Assigning Rotational Axes at the UA level->", end = ' ')
# Rotation axes :
# the axes will have the geometry of a
# local spherical-polar coordinate system
# assigned locally to each UA bead.
# See Chakravorty et. al. 2020 on the math behind it.
for iBead in ridBeadCollection.listOfBeads:
# fetch its heavy atom
iheavy = list(filter(lambda idx: idx in heavyAtomArray, iBead.atomList))
try:
# check that these is only one heavy atom in the bead
assert(len(iheavy) == 1)
except:
raise ValueError(f"An united atom bead cannot have more than one heavy atom. {len(iheavy)} found.")
iheavy = iheavy[0]
for iFrame in range(numFrames):
# from each of the hydrogen atoms bonded to it
# get the average position lab coordinate
avgHydrogenPosition = get_avg_hpos(arg_atom= iheavy, \
arg_frame = iFrame, \
arg_selector = arg_selector, \
arg_hostDataContainer = arg_hostDataContainer)
# use the resultant vector to generate an
# orthogonal local coordinate axes system
# with origin at the heavy atom position
heavyOrigin = arg_hostDataContainer._labCoords[iFrame, iheavy]
iAtomBasis = GF.get_sphCoord_axes(arg_r=avgHydrogenPosition)
arg_hostDataContainer.update_rotationAxesArray_at(arg_frame = iFrame, \
arg_atomList = iBead.atomList, \
arg_pAxes = iAtomBasis, \
arg_orig = heavyOrigin)
arg_hostDataContainer.update_localCoords("R", iBead.atomList)
Utils.printflush('Done')
# update local forces
Utils.printflush('Updating Local forces->',end=' ')
arg_hostDataContainer.update_localForces("T", resSel.indices)
Utils.printflush('Done')
# update torques using the local rotational axes
Utils.printflush('Updating Local torques->', end = ' ')
for iAtom_in_rid in resSel.indices:
for iFrame in range(numFrames):
coords_i = arg_hostDataContainer.localCoords[iFrame, iAtom_in_rid]
forces_i = arg_hostDataContainer.rotationAxesArray[iFrame, iAtom_in_rid][0:3,]@arg_hostDataContainer._labForces[iFrame,iAtom_in_rid]
arg_hostDataContainer.localTorques[iFrame,iAtom_in_rid,:] = CF.cross_product(coords_i,forces_i)
Utils.printflush('Done')
# mass weighting the forces and torque
Utils.printflush('Weighting forces and torques->', end = ' ')
for iBead in ridBeadCollection.listOfBeads:
for iFrame in range(numFrames):
# mass weighting the forces for each bead (iBead) in each direction (j)
# inertia weighting the torques for each bead (iBead) in each direction (j)
for iAtom in iBead.atomList:
iBead.totalWeightedForces[iFrame] += arg_hostDataContainer.localForces[iFrame, iAtom]
iBead.totalWeightedTorques[iFrame] += arg_hostDataContainer.localTorques[iFrame, iAtom]
iBead.totalWeightedForces[iFrame] /= nmp.sqrt(iBead.get_total_mass())
# define local basis as the rotationalAxes of the first atom in the atomList of iBead
iLocalBasis = arg_hostDataContainer.rotationAxesArray[iFrame][iBead.atomList[0]]
beadMOITensor = iBead.get_moment_of_inertia_tensor_local(arg_localBasis = iLocalBasis, arg_frame = iFrame)
# get total torque and force in each direction and weight them by √beadMOITensor[jj]
for j in range(3):
try:
if nmp.isclose(iBead.totalWeightedTorques[iFrame,j] , 0.0):
# then the beadMOITensor[j,j] must be 0 as well
# ensure that
assert(nmp.isclose(beadMOITensor[j,j] , 0.0))
else:
# inertia weight the total torque component
iBead.totalWeightedTorques[iFrame,j] /= nmp.sqrt(beadMOITensor[j,j])
except:
raise AssertionError(f"Moment of Intertia is non-zero for a bead lying on axis {j}")
Utils.printflush('Done')
# now fill in the matrices
Utils.printflush("Updating the submatrices ... ")
ridBeadCollection.update_subMatrix(arg_pairString="FF",arg_verbose=arg_verbose)
ridBeadCollection.update_subMatrix(arg_pairString="TT",arg_verbose=arg_verbose)
Utils.printflush('Done')
#make quadrant from subMatrices
Utils.printflush("Generating Quadrants->",end = ' ')
ffQuadrant = ridBeadCollection.generate_quadrant(arg_pairString="FF",arg_filterZeros=0)
ttQuadrant = ridBeadCollection.generate_quadrant(arg_pairString="TT",arg_filterZeros=0)
Utils.printflush("Done")
# scale forces/torques of these quadrants
ffQuadrant = nmp.multiply(arg_fScale**2, ffQuadrant)
ttQuadrant = nmp.multiply(arg_tScale**2, ttQuadrant)
# remove any row or column with zero axis
# this could have been done while generating quadrants. Can be merged if wished for
ffQuadrant = ridBeadCollection.filter_zero_rows_columns(ffQuadrant)
ttQuadrant = ridBeadCollection.filter_zero_rows_columns(ttQuadrant)
# print matrices if asked
if arg_moutFile:
Writer.write_a_matrix(arg_matrix = ffQuadrant\
, arg_descriptor = "FF COV AT UNITED ATOM LEVEL FOR RES {}".format(resLabel)\
, arg_outFile = arg_moutFile)
Writer.write_a_matrix(arg_matrix = ttQuadrant\
, arg_descriptor = "TT COV AT UNITED ATOM LEVEL FOR RES {}".format(resLabel)\
, arg_outFile = arg_moutFile)
#diagnolaize
Utils.printflush("Diagonalizing->", end = ' ')
lambdasFF, eigVectorsFF = Utils.diagonalize(ffQuadrant)
lambdasTT, eigVectorsTT = Utils.diagonalize(ttQuadrant)
Utils.printflush('Done')
# since eigen values can be complex numbers
# but with imag parts very close to zero
# use numpy's real_if_close with some tolerance to mask the imag parts
# Utils.printflush('Checking the nature of eigen values and conditioning them ...', end = ' ')
# tol = 1e+5
# lambdasFF = nmp.real_if_close(lambdasFF/1e+5, tol= tol)
# lambdasTT = nmp.real_if_close(lambdasTT/1e+5, tol= tol)
# Utils.printflush('Done')
# filter real zero values
lambdasFF = nmp.asarray([lm for lm in lambdasFF if not nmp.isclose(lm, 0.0)])
lambdasTT = nmp.asarray([lm for lm in lambdasTT if not nmp.isclose(lm, 0.0)])
# change to SI units
Utils.printflush('Changing the units of eigen values to SI units->', end = ' ')
lambdasFF = UAC.change_lambda_units(lambdasFF)
lambdasTT = UAC.change_lambda_units(lambdasTT)
Utils.printflush('Done')
# Create a spectrum to store these modes for
# proper output and analyses.
modeSpectraFF = []
for midx, mcombo in enumerate(zip(lambdasFF, eigVectorsFF)):
fflmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(fflmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = fflmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(fflmb, arg_temper)
modeSpectraFF.append(newMode)
ridBeadCollection.assign_attribute("modeSpectraFF", modeSpectraFF)
modeSpectraTT = []
for midx, mcombo in enumerate(zip(lambdasTT, eigVectorsTT)):
ttlmb, evec = mcombo
# compute mode frequencies
# nu = sqrt(lambda/kT)*(1/2pi)
# Units: 1/s
mfreq = compute_frequency_from_lambda(ttlmb, arg_temper)
newMode = ModeClasses.Mode(arg_modeIdx = midx + 1, \
arg_modeEval = ttlmb, \
arg_modeEvec = evec, \
arg_modeFreq = mfreq)
newMode.modeAmpl = compute_ampfac_from_lambda(ttlmb, arg_temper)
modeSpectraTT.append(newMode)
ridBeadCollection.assign_attribute("modeSpectraTT", modeSpectraTT)
# sorting the spectrum
Utils.printflush('Sorting spectrum in ascending order of frequencies->', end = ' ')
ridBeadCollection.modeSpectraFF = ModeClasses.sort_modes(ridBeadCollection.modeSpectraFF)
ridBeadCollection.modeSpectraTT = ModeClasses.sort_modes(ridBeadCollection.modeSpectraTT)
Utils.printflush('Done')
# Print modes if asked
if arg_nmdFile:
Writer.append_file(arg_nmdFile)
ridBeadCollection.write_nmd_file(arg_nmdfile = arg_nmdFile, \
arg_spectrum = ridBeadCollection.modeSpectraFF, \
arg_wfac = [iBead.get_total_mass() for iBead in ridBeadCollection.listOfBeads])
# compute entropy
# 1. remove the smallest 6 freqs from FF sprectrum
# because they may be overlapping with residue level motions
# 2. DO NOT remove any freq from TT spectrum because
# they are uncoupled to any TT freq in any other hierarchy
entropyFF = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in ridBeadCollection.modeSpectraFF[6:]]
entropyTT = [calculate_entropy_per_dof(m.modeFreq, arg_temper) for m in ridBeadCollection.modeSpectraTT[0:]]
ridTotalEntropyFF = nmp.sum(entropyFF)
ridTotalEntropyTT = nmp.sum(entropyTT)
# print final outputs
Utils.printflush("Entropy values:")
Utils.printflush('{:<40s} : {:.4f} J/mol/K'.format('FF Entropy (UA for {})'.format(resLabel), ridTotalEntropyFF))
Utils.printflush('{:<40s} : {:.4f} J/mol/K'.format('TT Entropy (UA for {})'.format(resLabel), ridTotalEntropyTT))
if arg_outFile != None:
Utils.printOut(arg_outFile,'UATOM {:<10}{:>5}{:>12.3f}{:>12.3f}'\
.format(iResname\
, iResid\
, ridTotalEntropyFF\
, ridTotalEntropyTT))
Utils.printflush("\n\n")
result.append([iResname, iResid, ridTotalEntropyFF, ridTotalEntropyTT])
totalUAEntropyFF += ridTotalEntropyFF
totalUAEntropyTT += ridTotalEntropyTT
result_df = pd.DataFrame(result, columns=['RESNAME', 'RESID', 'FF_ENTROPY(J/mol/K)', 'TT_ENTROPY(J/mol/K)'])
# Final information
Utils.hbar(60)
Utils.printflush(f"{'Total Entropy FF (UA level)':<25} : {totalUAEntropyFF:>15.3f} J/mol/K")
Utils.printflush(f"{'Total Entropy TT (UA level)':<25} : {totalUAEntropyTT:>15.3f} J/mol/K")
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'_'*60)
Utils.printOut(arg_outFile,f"{'Total Entropy FF (UA level)':<25} : {totalUAEntropyFF:>15.3f} J/mol/K")
Utils.printOut(arg_outFile,f"{'Total Entropy TT (UA level)':<25} : {totalUAEntropyTT:>15.3f} J/mol/K")
Utils.printOut(arg_outFile,'-'*60)
if arg_csv_out != None:
result_df.to_csv(arg_csv_out, index=False)
return (totalUAEntropyFF, totalUAEntropyTT, result_df)
#END
def compute_topographical_entropy0_SC(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
"""A code that computes the topographical entropy using the formula S = -Sum(pLog(p)).
Every SC dihedral from every residue will be scanned.
Each dihedral will be depicted using a vector of order 3 of the form |g+, g-, t> (arbitrarily chosen) and
so can have a maximum of three different configurations it can be in. Its probability of being in each of
these states will be computed and entropy will be coputed form that.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Total SideChain Topog. Entropy
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy of residue side chains \n computed using all the dihedrals with pLogp formalism"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy of residue side chains \n computed using all the dihedrals with pLogp formalism"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# log of number of frames (a constant)
logNumFrames = nmp.log(numFrames)
# conformation vector order |g+, g-, t>
vecOrder = 3
# total SC entropy
totalTopogEntropySC = 0.
# browse through each residue in the system and get their dihedrals
for resindices in allSel.residues.resindices:
Utils.printflush('-'*10,end='')
Utils.printflush('Working on resid : {} ({})'.format(arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]), end='')
Utils.printflush('-'*10)
resid = arg_hostDataContainer.universe.residues.resids[resindices]
# total SC entropy at the topographical level of thi residue
ridTopogEntropy = 0.
diheds_in_rid = set()
iAtom_in_rid = nmp.flip(allSel.select_atoms(f"resid {resid}").atoms.indices)
for idx in iAtom_in_rid:
for iDih in arg_hostDataContainer.dihedralTable[idx]:
# see if it is exclusive to this resid because they could also be peptide bond diheds
if iDih.is_from_same_residue() == resid and (iDih.is_heavy()) and (not iDih.is_BB_dihedral()):
diheds_in_rid.add(iDih)
Utils.printflush('Found {} exclusive dihedrals in residue {}'.format(len(diheds_in_rid), arg_hostDataContainer.universe.residues.resnames[resindices]))
# define a list of ConformationEntities for this residue
conformationEntityList = []
for iDih in diheds_in_rid:
dihAtoms = {"atom1": iDih.atom1,
"atom2": iDih.atom2,
"atom3": iDih.atom3,
"atom4": iDih.atom4,
"isBB" : iDih.is_BB_dihedral(),
"isHeavy" : iDih.is_heavy(),
"isSameRes" : iDih.is_from_same_residue()}
# make an entity from this dihedral
newEntity = CONF.ConformationEntity(arg_order = vecOrder, arg_numFrames = numFrames, **dihAtoms)
# generate a time series of the conformations it acquires.
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value at that frame
phi = iDih.get_dihedral_angle_lab(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
# place it in the time series block appropriately
newEntity.timeSeries[:,iFrame] = nmp.asarray([isGaucheP, isGaucheN, isTrans]).astype(int)
# add this dihedral into the list of conformation entities
conformationEntityList.append(newEntity)
# go over each entity and find its entropy. Add its entropy to the total entropy.
for iEntity in conformationEntityList:
sEntity = 0.
for iRow in range(vecOrder):
# get the total number of occurences of '1' in that row ( count )
iCount = nmp.sum(iEntity.timeSeries[iRow,:])
if iCount != 0:
# means that state was atained at least once
# p Log(p) for this state
iPlogP = iCount * (nmp.log(iCount) - logNumFrames)
sEntity += iPlogP;
sEntity /= numFrames
sEntity *= -CONST.GAS_CONST #(R)
# add entropy of this entity to the residue's SC topographical entropy
ridTopogEntropy += sEntity
Utils.printflush('Dihedral {:<5d}{:<5d}{:<5d}{:<5d} : {:.4f}'.format(iEntity.atom1, iEntity.atom2, iEntity.atom3, iEntity.atom4, sEntity))
if arg_outFile != None:
Utils.printOut(arg_outFile, 'Dihedral {:<5d}{:<5d}{:<5d}{:<5d} : {:.4f}'.format(iEntity.atom1, iEntity.atom2, iEntity.atom3, iEntity.atom4, sEntity))
# Final residue SC information
Utils.printflush('{:<40s} : {:.4f}'.format('Side Chain Topographical Entropy ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
if arg_outFile != None:
Utils.printOut(arg_outFile, '{:<40s} : {:.4f}'.format('Side Chain Topographical Entropy ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
# add this residue's SC entropy to the total SC entropy
totalTopogEntropySC += ridTopogEntropy
# total SC topographical entropy
Utils.hbar(60)
Utils.printflush('{:<40} : {:>15.3f}'.format('Total SC Topog. Entropy ', totalTopogEntropySC))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '_'*60)
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total SC Topog. Entropy ', totalTopogEntropySC))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropySC
#END
def compute_topographical_entropy0_BB(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
""" A code that computes the topographical entropy using the formula S = -Sum(pLog(p)).
Every BB dihedral from the protein will be scanned.
Each dihedral will be depicted using a vector of order 3 of the form |g+, g-, t> (arbitrarily chosen) and
so can have a maximum of three different configurations it can be in. Its probability of being in each of
these states will be computed and entropy will be coputed form that.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Total Backbone Topog. Entropy
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy of BB dihedrals \n computed using the pLogp formalism"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy of BB dihedrals \n computed using the pLogp formalism"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# log of number of frames (a constant)
logNumFrames = nmp.log(numFrames)
# conformation vector order |g+, g-, t>
vecOrder = 3
# total BB entropy
totalTopogEntropyBB = 0.
# fetch all the heavy BB dihedrals
bbDiheds = list(filter(lambda dih: dih.is_BB_dihedral() and dih.is_heavy(), arg_hostDataContainer.dihedralArray))
Utils.printflush('Found a total of {} BB dihedrals.'.format(len(bbDiheds)))
# define a list of ConformationEntities to store all the BB dihedrals
conformationEntityList = []
for iBBDih in bbDiheds:
dihAtoms = {"atom1": iBBDih.atom1,
"atom2": iBBDih.atom2,
"atom3": iBBDih.atom3,
"atom4": iBBDih.atom4,
"isBB" : iBBDih.is_BB_dihedral(),
"isHeavy" : iBBDih.is_heavy(),
"isSameRes" : iBBDih.is_from_same_residue()}
# make an entity from this dihedral
newEntity = CONF.ConformationEntity(arg_order = vecOrder, arg_numFrames = numFrames, **dihAtoms)
# generate a time series of the conformations it acquires.
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value at that frame
phi = iBBDih.get_dihedral_angle_lab(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
# create an instance of ConformationVector
v = nmp.asarray([isGaucheP, isGaucheN, isTrans]).astype(int)
# place it in the time series block appropriately
newEntity.timeSeries[:,iFrame] = v
# add this dihedral into the list of conformation entities
conformationEntityList.append(newEntity)
# go over each entity and find its entropy. Add its entropy to the total BB topographical entropy.
for iEntity in conformationEntityList:
sEntity = 0.
for iRow in range(vecOrder):
# get the total number of occurences of '1' in that row ( count )
iCount = nmp.sum(iEntity.timeSeries[iRow,:])
if iCount != 0:
# means that state was atained at least once
# p Log(p) for this state
iPlogP = iCount * (nmp.log(iCount) - logNumFrames)
sEntity += iPlogP;
sEntity /= numFrames
sEntity *= -CONST.GAS_CONST #(R)
Utils.printflush('Dihedral {:<5d}{:<5d}{:<5d}{:<5d} : {:.4f} ({:>5d})'.format(iEntity.atom1, iEntity.atom2, iEntity.atom3, iEntity.atom4, sEntity, iEntity.isSameRes))
if arg_outFile != None:
Utils.printOut(arg_outFile, 'Dihedral {:<5d}{:<5d}{:<5d}{:<5d} : {:.4f} ({:>5d})'.format(iEntity.atom1, iEntity.atom2, iEntity.atom3, iEntity.atom4, sEntity, iEntity.isSameRes))
# add entropy of this entity to the residue's SC topographical entropy
totalTopogEntropyBB += sEntity
# total BB topographical entropy
Utils.hbar(60)
Utils.printflush('{:<40} : {:>15.3f}'.format('Total BB Topog. Entropy ', totalTopogEntropyBB))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '_'*60)
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total BB Topog. Entropy ', totalTopogEntropyBB))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropyBB
#END
def compute_topographical_entropy1_SC(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
""" A function that computes the entropy over the states acquired by the a residue in terms of the states acquired by
its dihedrals by also accounting for their correlated motions. A residue is depicted as a vector of length N_d where N_d
is the number of dihedrals. Each dihedral is represented using an integer which is a decimal equivalent of its state of some order Q
which is represented by a binary vector of that size. At each time frame, a vector of integers of size N_d is stored and it stores that
time frame uniquely. All the different states acquired are then used to compute the entropy using p-logP.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Total SideChain Topog. Entropy
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy of residue side chains \ncomputed using all the dihedrals with correlation/pLogp formalism"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy of residue side chains \ncomputed using all the dihedrals with correlation/pLogp formalism"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# log of number of frames (a constant)
logNumFrames = nmp.log(numFrames)
# conformation vector order |g+, g-, t>
vecOrder = 3 # (= Q)
# total SC entropy
totalTopogEntropySC = 0.
# define a list of ConformationEntities where each element corresponds to a residue
conformationEntityList = []
# browse through each residue in the system and get their dihedrals
for resindices in allSel.residues.resindices:
Utils.printflush('-'*10,end='')
Utils.printflush('Working on resid : {} ({})'.format(arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]), end='')
Utils.printflush('-'*10)
resid = arg_hostDataContainer.universe.residues.resids[resindices]
# build a binary tree that will hold unique dihedrals
# uniqueness is defined based on 2-3 atom indexes
diheds_in_rid = CustomDataTypes.BinaryTree()
iAtom_in_rid = nmp.flip(allSel.select_atoms(f"resid {resid}").atoms.indices)
for idx in iAtom_in_rid:
for iDih in arg_hostDataContainer.dihedralTable[idx]:
# see if it is a side chain dihedral exclusive to this resid
if iDih.is_from_same_residue() == resid and iDih.is_heavy() and not (iDih.is_BB_phi() or iDih.is_BB_psi()):
dihNode = CustomDataTypes.TreeNode(None, None, iDih)
diheds_in_rid.add_node(dihNode)
Utils.printflush('Found {} exclusive dihedrals in residue {}{}'.\
format(len(diheds_in_rid), arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]))
# create an object of Class ConformationEntity corresponding to this residue
newEntity = CONF.ConformationEntity(arg_order = len(diheds_in_rid), arg_numFrames = numFrames)
# also initialize a string array that will store the state in each frame as a distinct string
# made from coalesced character cast of numeric arrays
ridDecimalReprArray = []
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value of each of the dihedrals for this residue at that frame
for i, iDih in enumerate(diheds_in_rid.list_in_order()):
phi = iDih.get_dihedral_angle_lab(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
v = bytearray([isGaucheP, isGaucheN, isTrans])
newEntity.timeSeries[i,iFrame] = Utils.binary_to_dec_repr(v)
# populate the ridDecimalReprArray appropriately
ridDecimalReprArray.append(Utils.coalesce_numeric_array(newEntity.timeSeries[:,iFrame]))
# for each of the unique state get their count and compute the topographical entropy for this residue
setOfstates = set(ridDecimalReprArray)
Utils.printflush('Found {} dihedrals which collectively acquire {} unique conformers'.format(len(diheds_in_rid), len(setOfstates)))
# print(ridDecimalReprArray)
# total SC entropy at the topographical level of this residue
ridTopogEntropy = 0.
for iState in setOfstates:
iCount = ridDecimalReprArray.count(iState)
# p Log(p) for this state
iPlogP = iCount * (nmp.log(iCount) - logNumFrames)
ridTopogEntropy += iPlogP;
ridTopogEntropy /= numFrames;
ridTopogEntropy *= -CONST.GAS_CONST #(R)
# Final residue SC information
Utils.printflush('{:<40s} : {:.4f}'.format('Side Chain Topographical Entropy from corr. pLogP method ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
if arg_outFile != None:
Utils.printOut(arg_outFile, '{:<40s} : {:.4f}'.format('Side Chain Topographical Entropy from corr. pLogP method ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
# add this residue's SC entropy to the total SC entropy
totalTopogEntropySC += ridTopogEntropy
# total SC topographical entropy
Utils.hbar(60)
Utils.printflush('{:<40} : {:>15.3f}'.format('Total SC Topog. Entropy (corr. pLogP) ', totalTopogEntropySC))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '_'*60)
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total SC Topog. Entropy (corr. pLogP)', totalTopogEntropySC))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropySC
#END
def compute_topographical_entropy1_BB(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
"""
A function that computes the entropy over the states acquired
collectively by the heavy BB dihedrals in a protein
by also accounting for their correlated motions.
A protein's colleciton of BB diheds is depicted as
a vector of length N_d where N_d is the number of BB dihedrals.
Each dihedral's state is represented using 0/1 telling which state it was in.
Then at each time frame, the state of a dihedral is computed and
represented using a decimal equivalent of its buytearray form.
For the entire protein, each time frame has a tuple of integers
corresponding to it which describes it uniquely. All the different
states acquired are then used to compute the entropy using p-logP.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Total Backbone Topog. Entropy
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy of BB dihedrals \ncomputed using the correlated-pLogp formalism"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy of BB dihedrals \ncomputed using the correlated-pLogp formalism"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# log of number of frames (a constant)
logNumFrames = nmp.log(numFrames)
# conformation vector order |g+, g-, t>
vecOrder = 3
# total BB entropy
totalTopogEntropyBB = 0.
# fetch all the heavy BB dihedrals
bbDiheds = CustomDataTypes.BinaryTree()
for iDih in arg_hostDataContainer.dihedralArray:
# see if it is a peptide bond dihedral
if iDih.is_heavy() and iDih.is_BB_dihedral():
dihNode = CustomDataTypes.TreeNode(None, None, iDih)
bbDiheds.add_node(dihNode)
# create an instance of Class ConformationEntity that will contain all of these BB diheds
newEntity = CONF.ConformationEntity(arg_order = len(bbDiheds), arg_numFrames = numFrames)
# also initialize a string array that will store the state in each frame as a distinct string
# made from coalesced character cast of numeric arrays
bbDecimalReprArray = []
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value of each of the BB dihedrals in the protein at that frame
for i, iDih in enumerate(bbDiheds.list_in_order()):
phi = iDih.get_dihedral_angle_lab(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
v = bytearray([isGaucheP, isGaucheN, isTrans])
newEntity.timeSeries[i,iFrame] = Utils.binary_to_dec_repr(v)
# populate the bbDecimalReprArray appropriately
bbDecimalReprArray.append(Utils.coalesce_numeric_array(newEntity.timeSeries[:,iFrame]))
# for each of the unique state get their count and compute the topographical entropy for this residue
setOfstates = set(bbDecimalReprArray)
Utils.printflush('Found {} dihedrals which collectively acquire {} unique conformers'.format(len(bbDiheds), len(setOfstates)))
# total BB entropy at the topographical level
totalTopogEntropyBB = 0.
for iState in setOfstates:
iCount = bbDecimalReprArray.count(iState)
# p Log(p) for this state
iPlogP = iCount * (nmp.log(iCount) - logNumFrames)
totalTopogEntropyBB += iPlogP;
totalTopogEntropyBB /= numFrames;
totalTopogEntropyBB *= -CONST.GAS_CONST #(R)
# total BB topographical entropy
Utils.hbar(60)
Utils.printflush('{:<40} : {:>15.3f}'.format('Total BB Topog. Entropy (corr. pLogP) ', totalTopogEntropyBB))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '_'*60)
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total BB Topog. Entropy (corr. pLogP) ', totalTopogEntropyBB))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropyBB
#END
def compute_topographical_entropy_method4(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
"""
!!! Work in progress
Function that computes the topographical entropy using Method 4, Phi Coeff
a.k.a the dihedral-state-contingency method.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Topog. Entropy (Method4)
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy using dihedral-state-contingency method"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy using dihedral-state-contingency method"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# conformation vector order |g+, g-, t>
vecOrder = 3 # (= Q)
# initialize total entropy from all residues
totalTopogEntropy4 = 0
# all the dihedrals will be computed using coordinates projected onto
# molecular principal axes frames. (It should howver not matter what
# axes system we chose because dihedrals are measured using vector differences
# which should not depend on the choice of coordinate systems).
CF.cast_translationAxesArray_at_molecule_level(arg_dataContainer=arg_hostDataContainer)
# update local coordinates
if arg_verbose >= 2:
Utils.printflush("Updating Local coordinates based on new Principal Axes ... ",end= ' ')
arg_hostDataContainer.update_localCoords_of_all_atoms(arg_type="T")
if arg_verbose >= 2:
Utils.printflush('Done')
#
#
# Residue wise calculation of topographical entropy
#
#
for resindices in allSel.residues.resindices:
Utils.printflush('-'*10,end='')
Utils.printflush('Working on resid : {} ({})'.format(arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]), end='')
Utils.printflush('-'*10)
resid = arg_hostDataContainer.universe.residues.resids[resindices]
dihedsInRid = set()
iAtom_in_rid = nmp.flip(allSel.select_atoms(f"resid {resid}").atoms.indices)
for idx in iAtom_in_rid:
for iDih in arg_hostDataContainer.dihedralTable[idx]:
# see if it is exclusive to this resid because they could also be peptide bond diheds
if iDih.is_from_same_residue() == resid and iDih.is_heavy():
dihedsInRid.add(iDih)
numDiheds = len(dihedsInRid)
if arg_verbose >= 2:
Utils.printflush('Found {} exclusive dihedrals in residue {}\
'.format(numDiheds, arg_hostDataContainer.universe.residues.resnames[resindices]))
# treat each dihedral as a conformation entity
# initialize a list of ConformationEntities for this molecule
conformationEntityList = []
# for each heavy dihedral
for iDih in dihedsInRid:
# make an entity from this dihedral
newEntity = CONF.ConformationEntity(arg_order = vecOrder, arg_numFrames = numFrames)
# generate a time series of the conformations it acquires.
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value at that frame
phi = iDih.get_dihedral_angle_local(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
# place it in the time series block appropriately
newEntity.timeSeries[:,iFrame] = nmp.asarray([isGaucheP, isGaucheN, isTrans], dtype = nmp.int8)
# add this dihedral into the list of conformation entities
conformationEntityList.append(newEntity)
#-------------------------------------------------------------------------------------
#
# initialize and populate the symmetric occupancy matrix (for the residue)
#
#-------------------------------------------------------------------------------------
# initialize
occuMatrix = -1000 * nmp.ones((numDiheds*vecOrder, numDiheds*vecOrder))
if arg_outFile != None:
Utils.printOut(arg_outFile, "Occupancy matrix for Residue {}".format(arg_hostDataContainer.universe.residues.resnames[resindices]))
# populate
for i in range(0,numDiheds):
iDih = conformationEntityList[i]
if arg_verbose >= 2:
Utils.printflush('Dihedral {} : |'.format(i), end = ' ' )
for j in range(i, numDiheds):
jDih = conformationEntityList[j]
if arg_verbose >= 2:
Utils.printflush('.',end='')
for iState in range(vecOrder):
idx = (vecOrder * i) + iState
iDihTimeSeries = iDih.timeSeries[iState,:]
for jState in range(vecOrder):
jdx = (vecOrder * j) + jState
jDihTimeSeries = jDih.timeSeries[jState,:]
# get the determinant of the contingency matrix computed from
# the dihedral states for this pair of dihedrals
ijElement = CF.phi_coeff(arg_v1 = iDihTimeSeries\
, arg_v2 = jDihTimeSeries)
# add entry at position idx, jdx
occuMatrix[idx, jdx] = (ijElement)
# add same entry at the tranpose position because the matrix is symmetric
occuMatrix[jdx, idx] = occuMatrix[idx, jdx]
if arg_verbose >= 2:
Utils.printflush('|')
# diagonlaize the occupancy matrix
lambdasPhi, eigVectorsPhi = Utils.diagonalize(occuMatrix)
# normalize the eig values with number of states and return the absolute value
lambdasPhi = nmp.abs(nmp.divide(lambdasPhi, vecOrder))
# is the occupancy matrix symmetric-positive definite? (are all the eigen values positive?)
for iLm, lm in enumerate(lambdasPhi):
if arg_outFile != None:
Utils.printOut(arg_outFile, "Eigen value {} = {}".format(iLm, lm))
# compute residue topog. entropy from the eigen values using the `lm.log(lm)` formalism
ridTopogEntropy4 = 0
for lm in filter(lambda x: x != 0, lambdasPhi):
ridTopogEntropy4 += (lm * nmp.log(lm) )
ridTopogEntropy4 *= -CONST.GAS_CONST #(R)
# Final residue entropy information
Utils.printflush('{:<40s} : {:.4f}'.format('Topog. Entropy using method4 ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy4))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '{:<40s} : {:.4f}'.format('Topog. Entropy using method4 ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy4))
Utils.printOut(arg_outFile, '-'*60)
# add this residue's topog. entropy to the total topog. entropy
totalTopogEntropy4 += ridTopogEntropy4
# print out the outputs
if arg_verbose >= 0:
Utils.printflush('{:<40} : {:>15.3f}'.format('Topog. Entropy (Method4) ', totalTopogEntropy4))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Topog. Entropy (Method4) ', totalTopogEntropy4))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropy4
#END
def compute_topographical_entropy_AEM(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
"""
Compute entropy by Adaptive Enumeration Method (AEM).
This method deals with each dihedral in a conformational entity on an individual basis. After that it coalesces
the state vectors of each dihedral in the conformational entity to help compute entropy using p-logP formulation.
This function computes the total entropy from all residue in the base molecule.
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Topog. Entropy (AEM)
"""
Utils.hbar(60)
Utils.printflush("{:^60}".format("Topographical entropy of residue side chains \ncomputed using all the dihedrals with AEM method"))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile,'-'*60)
Utils.printOut(arg_outFile,"{:^60}".format("Topographical entropy of residue side chains \ncomputed using all the dihedrals with AEM method"))
Utils.printOut(arg_outFile,'-'*60)
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# log of number of frames (a constant)
logNumFrames = nmp.log(numFrames)
# total SC entropy
totalTopogEntropySC = 0.
# browse through each residue in the system and get their dihedrals
for resindices in allSel.residues.resindices:
Utils.printflush('-'*10,end='')
Utils.printflush('Working on resid : {} ({})'.format(arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]), end='')
Utils.printflush('-'*10)
resid = arg_hostDataContainer.universe.residues.resids[resindices]
# build a binary tree that will hold unique dihedrals
# uniqueness is defined based on 2-3 atom indexes
diheds_in_rid = CustomDataTypes.BinaryTree()
iAtom_in_rid = nmp.flip(allSel.select_atoms(f"resid {resid}").atoms.indices)
for idx in iAtom_in_rid:
for iDih in arg_hostDataContainer.dihedralTable[idx]:
# see if it is a side chain dihedral exclusive to this resid
if iDih.is_from_same_residue() == resid and iDih.is_heavy():
dihNode = CustomDataTypes.TreeNode(None, None, iDih)
diheds_in_rid.add_node(dihNode)
Utils.printflush('Found {} exclusive dihedrals in residue {}{}'.\
format(len(diheds_in_rid), arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]))
# create an object of Class ConformationEntity corresponding to this residue
newEntity = CONF.ConformationEntity(arg_order = len(diheds_in_rid), arg_numFrames = numFrames)
# also initialize a string array that will store the state in each frame as a distinct string
# made from coalesced character cast of numeric arrays
ridDecimalReprArray = []
# for each dihedral identified, get the state vector
for i, iDih in enumerate(diheds_in_rid.list_in_order()):
stateTS = iDih.get_state_ts(arg_verbose = arg_verbose)
newEntity.timeSeries[i,:] = stateTS
# Now coalesce integer labels of the constituent dihedrals in each time point to get
# an expression of the conformation at that time.
for iFrame in range(numFrames):
ridDecimalReprArray.append(Utils.coalesce_numeric_array(newEntity.timeSeries[:,iFrame]))
# for each of the unique state get their count and compute the topographical entropy for this residue
setOfstates = set(ridDecimalReprArray)
Utils.printflush('Found {} dihedrals which collectively acquire {} unique conformers'.format(len(diheds_in_rid), len(setOfstates)))
# print(ridDecimalReprArray)
# total SC entropy at the topographical level of this residue
ridTopogEntropy = 0.
for iState in setOfstates:
iCount = ridDecimalReprArray.count(iState)
# p Log(p) for this state
iPlogP = iCount * (nmp.log(iCount) - logNumFrames)
ridTopogEntropy += iPlogP;
ridTopogEntropy /= numFrames;
ridTopogEntropy *= -CONST.GAS_CONST #(R)
# Final residue SC information
Utils.printflush('{:<40s} : {:.4f}'.format('Residue Topographical Entropy from AEM ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
if arg_outFile != None:
Utils.printOut(arg_outFile, '{:<40s} : {:.4f}'.format('Residue Topographical Entropy from AEM ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))
# add this residue's SC entropy to the total SC entropy
totalTopogEntropySC += ridTopogEntropy
# total SC topographical entropy
Utils.hbar(60)
Utils.printflush('{:<40} : {:>15.3f}'.format('Total Topog. Entropy (AEM) ', totalTopogEntropySC))
Utils.hbar(60)
if arg_outFile != None:
Utils.printOut(arg_outFile, '_'*60)
Utils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total Topog. Entropy (AEM)', totalTopogEntropySC))
Utils.printOut(arg_outFile, '-'*60)
return totalTopogEntropySC
#END
def compute_topographical_entropy_method3(arg_hostDataContainer, arg_selector="all", arg_outFile=None, arg_verbose=3):
"""
Function that computes the topographical entropy using Method 3, Corr. density function
Args:
arg_hostDataContainer (CodeEntropy.ClassCollection.DataContainer): Data Container for CodeEntropy
arg_selector (str, optional): Selection string for MDanalysis.Universe.select_atoms. Defaults to "all".
arg_outFile (str): path to a output file output is written via append mode
arg_verbose (int, optional): verbose level from 1-5. Defaults to 3.
Returns:
float: Topog. Entropy (Method4)
"""
allSel = arg_hostDataContainer.universe.select_atoms(arg_selector)
# number of frames
numFrames = len(arg_hostDataContainer.trajSnapshots)
# conformation vector order |g+, g-, t>
vecOrder = 3 # (= Q)
# treat each dihedral as a conformation entity
# initialize a list of ConformationEntities for this molecule
conformationEntityList = []
# fetch all the heavy dihedrals
nohDiheds = list(filter(lambda dih: dih.is_heavy(), arg_hostDataContainer.dihedralArray))
# for iDih in arg_baseMolecule.dihedralArray:
for iDih in nohDiheds:
dihAtoms = {"atom1": iDih.atom1,
"atom2": iDih.atom2,
"atom3": iDih.atom3,
"atom4": iDih.atom4,
"isBB" : iDih.is_BB_dihedral(),
"isHeavy" : iDih.is_heavy(),
"isSameRes" : iDih.is_from_same_residue()}
# make an entity from this dihedral
newEntity = CONF.ConformationEntity(arg_order = vecOrder, arg_numFrames = numFrames, **dihAtoms)
# generate a time series of the conformations it acquires.
# at each frame
for iFrame in range(numFrames):
# fetch the dihedral value at that frame
phi = iDih.get_dihedral_angle_lab(arg_frame = iFrame)
# define its status
# isGaucheP = ( 0 <= phi < 120)
# isGaucheN = ( 0 > phi >= -120 )
# isTrans = ( phi >= 120 or phi < -120)
# using a different categorisation because some dihedrals
# hover around the zero-lines and that makes it incorectly flexible
# e.g. aromatic ring planar dihedrals
isGaucheP = ( -30 <= phi < 90)
isGaucheN = ( -30 > phi >= -150 )
isTrans = ( phi >= 90 or phi < -150)
# place it in the time series block appropriately
newEntity.timeSeries[:,iFrame] = nmp.asarray([isGaucheP, isGaucheN, isTrans], dtype = nmp.int8)
# add this dihedral into the list of conformation entities
conformationEntityList.append(newEntity)
# total number of conformational entities (or dihedrals)
numDiheds = len(conformationEntityList)
# for each pair of dihedrals, find a matrix \rho_ij = \p_ij * \r_ij for i,j = 1 .. Q
# where \p_ij is the probability of seeing dihedral1 in state 'i' and dihedral 2 in state 'j'
# and \r_ij is the correlation of dihedral1 in state 'i' and dihedral 2 in state 'j'
# initialize a density matrix with values that can never be!
densityMatrix = -1000 * nmp.zeros((numDiheds*vecOrder, numDiheds*vecOrder))
for i in range(0,numDiheds):
iEntity = conformationEntityList[i]
Utils.printflush('Dihedral {} : |'.format(i), end = ' ' )
for j in range(i, numDiheds):
jEntity = conformationEntityList[j]
if arg_outFile != None:
Utils.printflush('.',end='')
Utils.printOut(arg_outFile, 'Dihedral {}: ({} {} {} {}) and Dihedral {}: ({} {} {} {})'.format(i, iEntity.atom1, iEntity.atom2, iEntity.atom3, iEntity.atom4, \
j, jEntity.atom1, jEntity.atom2, jEntity.atom3, jEntity.atom4))
for iState in range(vecOrder):
idx = (vecOrder * i) + iState
iDihedralTimeSeries = iEntity.timeSeries[iState,:]
iDihedralTimeSeriesSTD = nmp.std(iDihedralTimeSeries)
for jState in range(vecOrder):
jdx = (vecOrder * j) + jState
jDihedralTimeSeries = jEntity.timeSeries[jState,:]
jDihedralTimeSeriesSTD = nmp.std(jDihedralTimeSeries)
# correlation (r_ij)
ijCorrelation = -1000 #initialize with a number that can never be!
if iDihedralTimeSeriesSTD == 0:
if jDihedralTimeSeriesSTD == 0:
#both are not changing => correlation is '1'
ijCorrelation = 1
elif jDihedralTimeSeriesSTD != 0:
#one is changing irrespective of the other => no correlation
ijCorrelation = 0
elif iDihedralTimeSeriesSTD != 0:
if jDihedralTimeSeriesSTD == 0:
#one is changing irrespective of the other => no correlation
ijCorrelation = 0
else:
#compute the correlation using covariance
ijCovariance = CF.covariance(iDihedralTimeSeries, jDihedralTimeSeries)
ijCorrelation = ijCovariance/(iDihedralTimeSeriesSTD * jDihedralTimeSeriesSTD)
# probability of coexistence (p_ij)
ijProb = CF.probability_of_coexistence(iDihedralTimeSeries, jDihedralTimeSeries)
# add entry at position idx, jdx
densityMatrix[idx, jdx] = ijProb * ijCorrelation
# add same entry at the tranpose position because the matrix is symmetric
densityMatrix[jdx, idx] = densityMatrix[idx, jdx]
if arg_outFile != None:
Utils.printOut(arg_outFile, "{:>15.8f}".format(densityMatrix[idx, jdx]), end = "")
if jState == (vecOrder - 1):
if arg_outFile != None:
Utils.printOut(arg_outFile,'')
Utils.printflush('|')
# filter rows and columns with all zero (which make the matrix singular)
densityMatrix = CF.filter_zero_rows_columns(densityMatrix)
# diagonlaize the density matrix
lambdasRho, eigVectorsRho = Utils.diagonalize(densityMatrix)
# is the density matrix symmetric-positive definite?
for lr in lambdasRho:
if arg_outFile != None:
Utils.printOut(arg_outFile, lr)
# plot the matrix with imshow
if False:
mplot = plt.figure()
ax = mplot.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
plt.imshow(densityMatrix, cmap = "jet", vmin = -1, vmax = +1)
plt.show()
return lambdasRho
#END
| 46.385508
| 280
| 0.647125
| 13,344
| 115,871
| 5.504646
| 0.070219
| 0.052005
| 0.016772
| 0.02411
| 0.854916
| 0.8344
| 0.817723
| 0.810916
| 0.795178
| 0.78038
| 0
| 0.013011
| 0.262387
| 115,871
| 2,497
| 281
| 46.404085
| 0.846387
| 0.312598
| 0
| 0.706911
| 0
| 0.008326
| 0.100008
| 0.008511
| 0
| 0
| 0
| 0
| 0.006661
| 1
| 0.014155
| false
| 0
| 0.013322
| 0
| 0.041632
| 0.174022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00caee32f6fdb9305d9e25ebb1e626c7dfcd69d9
| 78
|
py
|
Python
|
angel/__init__.py
|
GiulioRossetti/ANGEL
|
63d29e90756f7c656c56170ca247a65c9667d416
|
[
"BSD-2-Clause"
] | 3
|
2020-06-16T07:43:24.000Z
|
2021-12-28T19:02:56.000Z
|
angel/__init__.py
|
GiulioRossetti/ANGEL
|
63d29e90756f7c656c56170ca247a65c9667d416
|
[
"BSD-2-Clause"
] | null | null | null |
angel/__init__.py
|
GiulioRossetti/ANGEL
|
63d29e90756f7c656c56170ca247a65c9667d416
|
[
"BSD-2-Clause"
] | null | null | null |
from angel.alg.iAngel import Angel
from angel.alg.iArchAngel import ArchAngel
| 26
| 42
| 0.846154
| 12
| 78
| 5.5
| 0.583333
| 0.272727
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 43
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
00ed870b59b7390a8f6e71f46576a9bd3120a811
| 16,269
|
py
|
Python
|
nexussdk/storages.py
|
hygt/nexus-python-sdk
|
3a625bdd218e164fbc6ad8d936357cd6ede19a98
|
[
"Apache-2.0"
] | null | null | null |
nexussdk/storages.py
|
hygt/nexus-python-sdk
|
3a625bdd218e164fbc6ad8d936357cd6ede19a98
|
[
"Apache-2.0"
] | null | null | null |
nexussdk/storages.py
|
hygt/nexus-python-sdk
|
3a625bdd218e164fbc6ad8d936357cd6ede19a98
|
[
"Apache-2.0"
] | null | null | null |
"""
This module provides a Python interface for operations on Storages.
It is part of the Knowledge Graph API of Blue Brain Nexus v1.
https://bluebrainnexus.io/docs/api/1.1/kg/kg-storages-api.html
"""
from typing import Dict, Optional
from urllib.parse import quote_plus as url_encode
from nexussdk.utils.http import http_get
from nexussdk.utils.http import http_put
from nexussdk.utils.http import http_post
from nexussdk.utils.http import http_delete
from nexussdk.utils.tools import listing_params
SEGMENT = "storages"
def create_(org_label: str, project_label: str, payload: Dict, storage_id: Optional[str]) -> Dict:
"""Create storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param payload: Payload of the storage
:param storage_id: (optional) User-defined ID of the storage, given as an IRI which is not URL encoded.
:return: The Nexus metadata of the created storage.
"""
if storage_id is not None:
payload["@id"] = storage_id
return http_post([SEGMENT, org_label, project_label], body=payload)
def update_(org_label: str, project_label: str, payload: Dict, storage_id: str, rev: int) -> Dict:
"""Update storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param payload: Payload of the storage
:param storage_id: (optional) User-defined ID of the storage, given as an IRI which is not URL encoded.
:param rev: last known revision of the storage
:return: The Nexus metadata of the updated storage.
"""
return http_put([SEGMENT, org_label, project_label, url_encode(storage_id)], body=payload, rev=rev)
def create_disk_storage(org_label: str, project_label: str, volume: str,
storage_id: Optional[str] = None, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False) -> Dict:
"""Create disk storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param volume: the volume on the local file system where the files are going to be stored
:param storage_id: (optional) User-defined ID of the storage, given as an IRI which is not URL encoded.
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:return: The Nexus metadata of the created storage.
"""
payload = {
"@type": "nxv:DiskStorage",
"volume": volume,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
return create_(org_label, project_label, payload, storage_id)
def create_s3_storage(org_label: str, project_label: str,
bucket: str,
storage_id: Optional[str] = None, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False, endpoint: Optional[str] = None,
region: Optional[str] = None,
access_key: Optional[str] = None,
secret_key: Optional[str] = None) -> Dict:
"""Create S3 storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param bucket: the S3 bucket where the files are going to be stored
:param storage_id: (optional) User-defined ID of the storage, given as an IRI which is not URL encoded.
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:param endpoint: (optional) S3 endpoint, either the domain or a full URL
:param region: (optional) S3 region
:param access_key: (optional) S3 access key
:param secret_key: (optional) S3 secret key
:return: The Nexus metadata of the created storage.
"""
payload = {
"@type": "nxv:S3Storage",
"bucket": bucket,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
if endpoint is not None:
payload["endpoint"] = endpoint
if region is not None:
payload["region"] = region
if access_key is not None:
payload["accessKey"] = access_key
if secret_key is not None:
payload["secretKey"] = secret_key
return create_(org_label, project_label, payload, storage_id)
def create_external_disk_storage(org_label: str, project_label: str, endpoint: str, folder: str,
storage_id: Optional[str] = None, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False,
credentials: Optional[str] = None) -> Dict:
"""Create external disk storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param endpoint: endpoint to communicate with the external storage
:param folder: external storage folder (similar concept to bucket in the S3)
:param storage_id: (optional) User-defined ID of the storage, given as an IRI which is not URL encoded.
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:param credentials: (optional) external storage optional Bearer Token
:return: The Nexus metadata of the created storage.
"""
payload = {
"@type": "nxv:ExternalDiskStorage",
"endpoint": endpoint,
"folder": folder,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
if credentials is not None:
payload["credentials"] = credentials
return create_(org_label, project_label, payload, storage_id)
def update_disk_storage(org_label: str, project_label: str, volume: str,
storage_id: str, rev: int, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False) -> Dict:
"""Update disk storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param volume: the volume on the local file system where the files are going to be stored
:param storage_id: the storage ID
:param rev: last known revision of the storage
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:return: The Nexus metadata of the updated storage.
"""
payload = {
"@id": storage_id,
"@type": "nxv:DiskStorage",
"volume": volume,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
return update_(org_label, project_label, payload, storage_id, rev)
def update_s3_storage(org_label: str, project_label: str,
bucket: str,
storage_id: str, rev: int, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False, endpoint: Optional[str] = None,
region: Optional[str] = None,
access_key: Optional[str] = None,
secret_key: Optional[str] = None) -> Dict:
"""Update S3 storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param bucket: the S3 bucket where the files are going to be stored
:param storage_id: the storage ID
:param rev: last known revision of the storage
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:param endpoint: (optional) S3 endpoint, either the domain or a full URL
:param region: (optional) S3 region
:param access_key: (optional) S3 access key
:param secret_key: (optional) S3 secret key
:return: The Nexus metadata of the updated storage.
"""
payload = {
"@id": storage_id,
"@type": "nxv:S3Storage",
"bucket": bucket,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
if endpoint is not None:
payload["endpoint"] = endpoint
if region is not None:
payload["region"] = region
if access_key is not None:
payload["accessKey"] = access_key
if secret_key is not None:
payload["secretKey"] = secret_key
return update_(org_label, project_label, payload, storage_id, rev)
def update_external_disk_storage(org_label: str, project_label: str, endpoint: str, folder: str,
storage_id: str, rev: int, read_permission: Optional[str] = None,
write_permission: Optional[str] = None, default: bool = False,
credentials: Optional[str] = None) -> Dict:
"""Update external disk storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param endpoint: endpoint to communicate with the external storage
:param folder: external storage folder (similar concept to bucket in the S3)
:param storage_id: the storage ID
:param rev: last known revision of the storage
:param read_permission: (optional) the permission required in order to download a file from this storage
:param write_permission: (optional) the permission required in order to upload a file to this storage
:param default: (optional) whether the storage should be the default storage for the project, defaults to False
:param credentials: (optional) external storage optional Bearer Token
:return: The Nexus metadata of the updated storage.
"""
payload = {
"@type": "nxv:ExternalDiskStorage",
"endpoint": endpoint,
"folder": folder,
"default": default
}
if storage_id is not None:
payload["@id"] = storage_id
if read_permission is not None:
payload["readPermission"] = read_permission
if write_permission is not None:
payload["writePermission"] = write_permission
if credentials is not None:
payload["credentials"] = credentials
return update_(org_label, project_label, payload, storage_id, rev)
def deprecate(org_label: str, project_label: str, storage_id: str, rev: int) -> Dict:
"""Deprecate storage
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param storage_id: the storage ID
:param rev: last known revision of the storage
:return: The Nexus metadata of the storage.
"""
return http_delete([SEGMENT, org_label, project_label, url_encode(storage_id)], rev=rev)
def tag(org_label: str, project_label: str, storage_id: str, tag: str, rev_to_tag: str, rev: int) -> Dict:
"""Tag a storage
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param storage_id: the storage ID
:param tag: tag label
:param rev_to_tag: revision to tag
:param rev: last known revision of the storage
:return: The Nexus metadata of the updated storage.
"""
payload = {
"tag": tag,
"rev": rev_to_tag,
}
return http_post([SEGMENT, org_label, project_label, url_encode(storage_id), "tags"], payload, rev=rev)
def tags(org_label: str, project_label: str, storage_id: str) -> Dict:
"""Fetch tags for storage.
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param storage_id: the storage ID
:return: The tags for the storage.
"""
return http_get([SEGMENT, org_label, project_label, url_encode(storage_id), "tags"])
def fetch(org_label: str, project_label: str, storage_id: str, tag: Optional[str] = None,
rev: Optional[int] = None) -> Dict:
"""Fetch a storage
:param org_label: Label of the organization the storage belongs to.
:param project_label: Label of the project the storage belongs to.
:param storage_id: the storage ID
:param tag: tag to fetch
:param rev: revision to fetch
:return: storage payload
"""
return http_get([SEGMENT, org_label, project_label, url_encode(storage_id)], rev=rev, tag=tag)
def list(org_label: str, project_label: str, pagination_from: Optional[int] = None,
pagination_size: Optional[int] = None, deprecated: Optional[bool] = None, type: Optional[str] = None,
created_by: Optional[str] = None, updated_by: Optional[str] = None, rev: Optional[int] = None) -> Dict:
"""List storages corresponding to some criteria.
:param org_label: Label of the organization to list the storages for.
:param project_label: Label of the project to list the storages for
:param pagination_from: (optional) Pagination index to start from.
Default: ``0``.
:param pagination_size: (optional) Number of results to return per page.
Default: ``20``.
:param deprecated: (optional) Deprecation status of the storages to keep.
:param type: (optional) Type of the storages to keep, given as an IRI.
:param created_by: (optional) Identity ID of the creator of the storages
to keep, given as an IRI.
:param updated_by: (optional) Identity ID of the last identity which has
updated the storages to keep, given as en IRI.
:param rev: (optional) Revision number of the storages to keep.
:return: A Nexus results list with the Nexus metadata of the matching storages.
"""
return http_get([SEGMENT, org_label, project_label],
params=listing_params(pagination_from, pagination_size, deprecated, type, created_by, updated_by,
rev))
| 43.268617
| 117
| 0.683631
| 2,186
| 16,269
| 4.972095
| 0.076853
| 0.048027
| 0.024013
| 0.04269
| 0.886466
| 0.87883
| 0.851964
| 0.845616
| 0.826295
| 0.813874
| 0
| 0.001934
| 0.237138
| 16,269
| 375
| 118
| 43.384
| 0.873822
| 0.479808
| 0
| 0.732484
| 0
| 0
| 0.06785
| 0.005834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082803
| false
| 0
| 0.044586
| 0
| 0.210191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
971c37f2ffdd0ab88324429196593d4e40d558d8
| 111
|
py
|
Python
|
relex/predictors/__init__.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 16
|
2020-04-21T19:04:23.000Z
|
2021-08-03T04:30:43.000Z
|
relex/predictors/__init__.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 3
|
2020-07-25T12:29:21.000Z
|
2021-06-11T02:06:58.000Z
|
relex/predictors/__init__.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 2
|
2020-06-25T12:50:57.000Z
|
2020-11-01T10:31:04.000Z
|
from relex.predictors.relation_classification.relation_classifier_predictor import RelationClassifierPredictor
| 55.5
| 110
| 0.936937
| 10
| 111
| 10.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 111
| 1
| 111
| 111
| 0.943925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8af3f1c4bfb132a59d67c0454c13b52ee3cc90b7
| 7,426
|
py
|
Python
|
src/python/tests/integration/test_web/test_handler/test_auto_queue.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | 255
|
2017-12-25T00:53:40.000Z
|
2022-03-27T10:29:21.000Z
|
src/python/tests/integration/test_web/test_handler/test_auto_queue.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | 111
|
2018-01-04T10:35:49.000Z
|
2022-03-29T15:12:52.000Z
|
src/python/tests/integration/test_web/test_handler/test_auto_queue.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | 53
|
2017-12-25T09:34:19.000Z
|
2022-03-15T17:53:27.000Z
|
# Copyright 2017, Inderpreet Singh, All rights reserved.
import json
from urllib.parse import quote
from controller import AutoQueuePattern
from tests.integration.test_web.test_web_app import BaseTestWebApp
class TestAutoQueueHandler(BaseTestWebApp):
def test_get(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="one"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="t wo"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="thr'ee"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fo\"ur"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fi%ve"))
resp = self.test_app.get("/server/autoqueue/get")
self.assertEqual(200, resp.status_int)
json_list = json.loads(str(resp.html))
self.assertEqual(5, len(json_list))
self.assertIn({"pattern": "one"}, json_list)
self.assertIn({"pattern": "t wo"}, json_list)
self.assertIn({"pattern": "thr'ee"}, json_list)
self.assertIn({"pattern": "fo\"ur"}, json_list)
self.assertIn({"pattern": "fi%ve"}, json_list)
def test_get_is_ordered(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="a"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="b"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="c"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="d"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="e"))
resp = self.test_app.get("/server/autoqueue/get")
self.assertEqual(200, resp.status_int)
json_list = json.loads(str(resp.html))
self.assertEqual(5, len(json_list))
self.assertEqual([
{"pattern": "a"},
{"pattern": "b"},
{"pattern": "c"},
{"pattern": "d"},
{"pattern": "e"}
], json_list)
def test_add_good(self):
resp = self.test_app.get("/server/autoqueue/add/one")
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("one"), self.auto_queue_persist.patterns)
uri = quote(quote("/value/with/slashes", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(2, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("/value/with/slashes"), self.auto_queue_persist.patterns)
uri = quote(quote(" value with spaces", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(3, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern(" value with spaces"), self.auto_queue_persist.patterns)
uri = quote(quote("value'with'singlequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(4, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("value'with'singlequote"), self.auto_queue_persist.patterns)
uri = quote(quote("value\"with\"doublequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(5, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("value\"with\"doublequote"), self.auto_queue_persist.patterns)
def test_add_double(self):
resp = self.test_app.get("/server/autoqueue/add/one")
self.assertEqual(200, resp.status_int)
resp = self.test_app.get("/server/autoqueue/add/one", expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern 'one' already exists.", str(resp.html))
def test_add_empty_value(self):
uri = quote(quote(" ", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri, expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
resp = self.test_app.get("/server/autoqueue/add/", expect_errors=True)
self.assertEqual(404, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
def test_remove_good(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern("one"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("/value/with/slashes"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(" value with spaces"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("value'with'singlequote"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("value\"with\"doublequote"))
resp = self.test_app.get("/server/autoqueue/remove/one")
self.assertEqual(200, resp.status_int)
self.assertEqual(4, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("one"), self.auto_queue_persist.patterns)
uri = quote(quote("/value/with/slashes", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(3, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("/value/with/slashes"), self.auto_queue_persist.patterns)
uri = quote(quote(" value with spaces", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(2, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern(" value with spaces"), self.auto_queue_persist.patterns)
uri = quote(quote("value'with'singlequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("value'with'singlequote"), self.auto_queue_persist.patterns)
uri = quote(quote("value\"with\"doublequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("value\"with\"doublequote"), self.auto_queue_persist.patterns)
def test_remove_non_existing(self):
resp = self.test_app.get("/server/autoqueue/remove/one", expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern 'one' doesn't exist.", str(resp.html))
def test_remove_empty_value(self):
uri = quote(quote(" ", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri, expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern ' ' doesn't exist.", str(resp.html))
self.assertEqual(0, len(self.auto_queue_persist.patterns))
resp = self.test_app.get("/server/autoqueue/remove/", expect_errors=True)
self.assertEqual(404, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
| 51.93007
| 104
| 0.684083
| 929
| 7,426
| 5.286329
| 0.097955
| 0.07697
| 0.103238
| 0.158827
| 0.914274
| 0.884138
| 0.875178
| 0.875178
| 0.766035
| 0.670332
| 0
| 0.012518
| 0.171694
| 7,426
| 142
| 105
| 52.295775
| 0.785888
| 0.007272
| 0
| 0.487395
| 0
| 0.008403
| 0.14749
| 0.076662
| 0
| 0
| 0
| 0
| 0.453782
| 1
| 0.067227
| false
| 0
| 0.033613
| 0
| 0.109244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c124295253c904f3e19d2b7a14be8902741e9a96
| 138
|
py
|
Python
|
tests/api/test_factory.py
|
jroquejr/nfe-reader
|
277379bfb9865b2656c2576d8ccf8c3e1f3cacd1
|
[
"MIT"
] | null | null | null |
tests/api/test_factory.py
|
jroquejr/nfe-reader
|
277379bfb9865b2656c2576d8ccf8c3e1f3cacd1
|
[
"MIT"
] | 2
|
2021-04-21T14:57:31.000Z
|
2021-04-21T14:57:32.000Z
|
tests/api/test_factory.py
|
jroquejr/nfe-reader
|
277379bfb9865b2656c2576d8ccf8c3e1f3cacd1
|
[
"MIT"
] | null | null | null |
from api.app import create_app
def test_factory():
assert not create_app().testing
assert create_app({"TESTING": True}).testing
| 19.714286
| 48
| 0.731884
| 20
| 138
| 4.85
| 0.6
| 0.278351
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15942
| 138
| 6
| 49
| 23
| 0.836207
| 0
| 0
| 0
| 0
| 0
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1ff88fad66967ae1ebe1bc58f80ce4b5daedef1
| 2,809
|
py
|
Python
|
cobra/utils/console_log.py
|
frankenstien-831/cobra
|
a2ec3ed1038c9606ed7e6978b5bf88f08fd2fc7f
|
[
"MIT"
] | 53
|
2019-07-14T07:19:56.000Z
|
2022-03-25T06:56:04.000Z
|
cobra/utils/console_log.py
|
frankenstien-831/cobra
|
a2ec3ed1038c9606ed7e6978b5bf88f08fd2fc7f
|
[
"MIT"
] | 1
|
2019-07-16T17:45:57.000Z
|
2019-07-17T22:16:09.000Z
|
cobra/utils/console_log.py
|
frankenstien-831/cobra
|
a2ec3ed1038c9606ed7e6978b5bf88f08fd2fc7f
|
[
"MIT"
] | 11
|
2019-07-14T09:26:12.000Z
|
2021-12-10T11:23:19.000Z
|
from colorama import Fore, Style
def console_log(text, _type=None, title=None, space=False, space_number=0):
# Checking text instance is string
if isinstance(text, str):
if title is None:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + '[SUCCESS]'
+ Style.RESET_ALL + ' ' + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + '[WARNING]'
+ Style.RESET_ALL + ' ' + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + '[ERROR]'
+ Style.RESET_ALL + ' ' + text)
else:
return print(text)
elif title is not None \
and isinstance(title, str) and not space:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + '[SUCCESS]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + '[WARNING]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + '[ERROR]'
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
else:
return print(Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif title is not None \
and isinstance(title, str) and space:
if _type == 'success':
return print(Style.DIM + Fore.GREEN + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'warning':
return print(Style.DIM + Fore.YELLOW + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
elif _type == 'error':
return print(Style.DIM + Fore.RED + ' '
+ Style.RESET_ALL + ' ' + Fore.WHITE + title
+ ': ' + Style.RESET_ALL + text)
else:
if space_number is 0:
return print(Fore.WHITE + '' + title
+ ': ' + Style.RESET_ALL + text)
else:
return print(Fore.WHITE + ' ' * space_number + title
+ ': ' + Style.RESET_ALL + text)
| 49.280702
| 75
| 0.420434
| 254
| 2,809
| 4.523622
| 0.153543
| 0.156658
| 0.203655
| 0.177546
| 0.849434
| 0.830287
| 0.830287
| 0.813751
| 0.813751
| 0.788512
| 0
| 0.00134
| 0.468494
| 2,809
| 56
| 76
| 50.160714
| 0.768252
| 0.011392
| 0
| 0.735849
| 0
| 0
| 0.057297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.018868
| 0
| 0.283019
| 0.245283
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e7c33b57e33d10a7f2bd559ec1a3c4c514bb414b
| 153
|
py
|
Python
|
tagger/data/__init__.py
|
XMUNLP/Tagger
|
02e1fd323ac747bfe5f7b8824c6b416fd90f33a1
|
[
"BSD-3-Clause"
] | 335
|
2017-12-08T07:14:32.000Z
|
2022-03-01T15:22:26.000Z
|
tagger/data/__init__.py
|
XMUNLP/Tagger
|
02e1fd323ac747bfe5f7b8824c6b416fd90f33a1
|
[
"BSD-3-Clause"
] | 23
|
2018-03-27T01:59:19.000Z
|
2022-02-15T16:15:57.000Z
|
tagger/data/__init__.py
|
XMUNLP/Tagger
|
02e1fd323ac747bfe5f7b8824c6b416fd90f33a1
|
[
"BSD-3-Clause"
] | 91
|
2017-12-08T07:14:34.000Z
|
2021-12-16T23:19:42.000Z
|
from tagger.data.dataset import get_dataset
from tagger.data.vocab import load_vocabulary, lookup
from tagger.data.embedding import load_glove_embedding
| 38.25
| 54
| 0.869281
| 23
| 153
| 5.608696
| 0.521739
| 0.232558
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084967
| 153
| 3
| 55
| 51
| 0.921429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
99b3ede6b3c6096b7a85dbe8a78e3d96b008603a
| 22,046
|
py
|
Python
|
py_dp/dispersion/dispersion_models_1d.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | 1
|
2019-12-23T14:35:43.000Z
|
2019-12-23T14:35:43.000Z
|
py_dp/dispersion/dispersion_models_1d.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | null | null | null |
py_dp/dispersion/dispersion_models_1d.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | 1
|
2019-12-23T14:34:43.000Z
|
2019-12-23T14:34:43.000Z
|
# Copyright 2017 Amir Hossein Delgoshaie, amirdel@stanford.edu
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee
# is hereby granted, provided that the above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import random as random
import bisect as bs
import numpy as np
from py_dp.dispersion.binning import get_cdf
from py_dp.dispersion.second_order_markov import find_1d_bins
from py_dp.dispersion.dispersion_models import dispersionModelGeneral
# These classes are not used now. Just kept for reference.
class dispModelUncorrStencil(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dt, dx_array, x_max,
inj_location = "start", verbose = True):
super(dispModelUncorrStencil,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.dx_array = dx_array
self.dt = dt
self.table_size = len(dx_array) - 1
self.x_max = x_max
def advance_one_step(self, particle_number, current_index):
x_max = self.x_max
end_of_domain_reached = False
dx_array = self.dx_array
dt = self.dt
table_size = self.table_size
rand_ind = random.randint(0,table_size)
dx = dx_array[rand_ind]
current_t = self.time_array[particle_number, current_index]
current_x = self.x_array[particle_number, current_index]
next_x = current_x + dx
if next_x > x_max:
velocity = dx/dt
distance_to_end = x_max - current_x
dt = distance_to_end/velocity
next_x = x_max
end_of_domain_reached = True
next_index = current_index + 1
self.time_array[particle_number,next_index] = current_t + dt
self.x_array[particle_number,next_index] = next_x
return end_of_domain_reached
def follow_specific_paticle(self,particle_number):
n_steps = self.n_steps
for step in range(n_steps):
#x_array, pore_nr_array, time_array entries are changed inside
#advance_to_next_pore
current_index = step
end_flag = self.advance_one_step(particle_number, current_index)
if end_flag:
freeze_idx = current_index + 1
self.freeze_particle(particle_number, freeze_idx)
break
def follow_all_particles(self):
n_particles = self.n_particles
for particle in range(n_particles):
self.follow_specific_paticle(particle)
def freeze_particle(self,particle_number,current_index):
"""
after a particle gets to the end of the domain it would stay there.
this function would copy the value at current_idx to all following values for x and time
"""
self.x_array[particle_number,current_index:] = self.x_array[particle_number,current_index]
self.time_array[particle_number,current_index:] = self.time_array[particle_number,current_index]
#self.freeze_time[particle_number] = self.time_array[particle_number,current_index]
self.last_index_array[particle_number] = current_index
class dispModelCorrelatedStencil(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedStencil,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_velocity = class_velocity
self.dt = dt
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dt = self.dt
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
next_idx = 0
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += dt*v
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedStencilFix(dispModelCorrelatedStencil):
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, length, inj_location = "start", verbose = True):
super(dispModelCorrelatedStencilFix,self).__init__(n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, inj_location, verbose)
self.length = length
def follow_one_particle(self, particle_number):
l = self.length
dt = self.dt
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
# initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
next_idx = 0
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
dx = v*dt
abs_dx = abs(dx)
if abs_dx < l:
length_traveled = 0.0
while abs(length_traveled) <= l - abs_dx and out_put_idx < idx_max:
length_traveled += dx
x += dx
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
else:
x += dt * v
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
class dispModelCorrelatedSpace(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_velocity, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedSpace,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_velocity = class_velocity
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dx = self.dx
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += np.sign(v)*dx
t += dx/abs(v)
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedSpaceKang(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedSpaceKang,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.class_velocity = self.get_class_velocity(class_log_edges)
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def get_class_velocity(self, class_log_edges):
v_log_edges = self.class_log_edges
n_class = len(class_log_edges) - 1
class_velocity = np.zeros(n_class)
for i in range(n_class):
log_value = 0.5*(v_log_edges[i] + v_log_edges[i+1])
class_velocity[i] = np.exp(log_value)
return class_velocity
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dx = self.dx
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
v_class_idx = self.draw_from_init_calss_idx()
class_idx = 2*v_class_idx
v = self.draw_from_class_velocity(v_class_idx)
v_sign = 1.0
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += v_sign*dx
t += dx/v
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v_class_idx = np.floor(next_idx/2)
v_sign = -1.0 + 2.0*((next_idx - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedStencilKang(dispersionModelGeneral):
"""
Class to model plume spreading using a Markov model in time, The velocity is
binned using the binning strategy in Kang 2010
"""
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedStencilKang,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.dt = dt
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def follow_one_particle(self, particle_number):
dt = self.dt
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
v_class_idx = self.draw_from_init_calss_idx()
class_idx = 2*v_class_idx
#v is the abs value of velocity
v = self.draw_from_class_velocity(v_class_idx)
v_sign = 1.0
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += dt*v*v_sign
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v_class_idx = np.floor(next_idx/2)
v_sign = -1.0 + 2.0*((next_idx - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelOrderTwo(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelOrderTwo,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.class_velocity = self.get_class_velocity(class_log_edges)
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.n_class = np.sqrt(trans_matrix.shape[0])
self.blocked_particles = []
def get_class_velocity(self, class_log_edges):
v_log_edges = self.class_log_edges
n_class = len(class_log_edges) - 1
class_velocity = np.zeros(n_class)
for i in range(n_class):
log_value = 0.5*(v_log_edges[i] + v_log_edges[i+1])
class_velocity[i] = np.exp(log_value)
return class_velocity
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def draw_from_init_calss_idx(self):
return bs.bisect_right(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
indptr = self.trans_matrix.indptr
start = indptr[current_class]
end = indptr[current_class+1]
rows = self.trans_matrix.indices[start:end]
values = self.trans_matrix.data[start:end]
if len(values) == 0:
return -12
cdf = get_cdf(values)
return rows[bs.bisect(cdf, random.random())]
def advance_x_t(self, v, v_sign, x, t):
t2 = t + self.dx/v
x2 = x + v_sign*self.dx
return x2, t2
def follow_one_particle(self, particle_number):
n_class = self.n_class
dx = self.dx
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
#class_idx is the index of the 2d class
class_idx = self.draw_from_init_calss_idx()
#i, ip1 are indices of (abs(v), sgn(v)) classes
i, ip1 = find_1d_bins(class_idx, n_class)
v_class_idx = np.floor(i/2)
v_sign = -1.0 + 2.0*((i - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
x,t = self.advance_x_t(v, v_sign, x, t)
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
v_class_idx = np.floor(ip1/2)
v_sign = -1.0 + 2.0*((ip1 - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x,t = self.advance_x_t(v, v_sign, x,t)
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
if next_idx == -12:
self.blocked_particles.append(particle_number)
return
t1, t2 = find_1d_bins(next_idx, n_class)
class_idx = next_idx
i, ip1 = find_1d_bins(class_idx, n_class)
v_class_idx = np.floor(ip1/2)
v_sign = -1.0 + 2.0*((ip1 - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
print "removing blocked particles: ", len(self.blocked_particles)
idx_array = np.array(range(self.n_particles))
blocked = np.array(self.blocked_particles)
idx_diff = np.setdiff1d(idx_array, blocked)
self.x_array = self.x_array[idx_diff]
self.time_array = self.time_array[idx_diff]
self.n_particles -= len(self.blocked_particles)
class dispModelTime3d(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
mapping, init_class_count, inj_location = "start",
verbose = True):
super(dispModelTime3d,self).__init__(n_particles, n_steps, inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.mapping = mapping
self.dt = dt
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.n_class = np.sqrt(trans_matrix.shape[0])
self.blocked_particles = []
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def draw_from_init_calss_idx(self):
return bs.bisect_right(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
indptr = self.trans_matrix.indptr
start = indptr[current_class]
end = indptr[current_class+1]
rows = self.trans_matrix.indices[start:end]
values = self.trans_matrix.data[start:end]
if len(values) == 0:
return -12
cdf = get_cdf(values)
return rows[bs.bisect_left(cdf, random.random())]
def advance_x_t(self, v, v_sign, freq, x, t):
dt = self.dt
dx = v_sign*v*dt
if freq>1:
t2 = np.arange(t, t + freq*dt, dt)
x2 = x + np.arange(1,1+freq)*dx
else:
t2 = t + dt
x2 = x + v_sign*v*dt
return x2, t2
def follow_one_particle(self, particle_number):
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
#class_idx is the index of the 2d class
class_idx = self.draw_from_init_calss_idx()
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
v_1d_class, v_sign, freq = self.mapping.find_absvclass_sgn_freq(class_idx)
v = self.mapping.draw_from_class_velocity(v_1d_class)
x_new, t_new = self.advance_x_t(v, v_sign, freq, x, t)
if freq>1:
end_idx = min(out_put_idx+freq, idx_max)
len_idx = end_idx - out_put_idx
x_array[particle_number, out_put_idx:end_idx] = x_new[:len_idx]
t_array[particle_number, out_put_idx:end_idx] = t_new[:len_idx]
out_put_idx += freq
x = x_new[-1]
t = t_new[-1]
else:
x_array[particle_number, out_put_idx] = x_new
t_array[particle_number, out_put_idx] = t_new
out_put_idx += 1
x = x_new
t = t_new
next_idx = self.choose_next_class(class_idx)
class_idx = next_idx
if next_idx == -12:
self.blocked_particles.append(particle_number)
return
def follow_all_particles(self):
for i in range(self.n_particles):
if not np.mod(i,200):
print 'particle number: ', i
self.follow_one_particle(i)
print "removing blocked particles: ", len(self.blocked_particles)
idx_array = np.array(range(self.n_particles))
blocked = np.array(self.blocked_particles)
idx_diff = np.setdiff1d(idx_array, blocked)
self.x_array = self.x_array[idx_diff]
self.time_array = self.time_array[idx_diff]
self.n_particles -= len(self.blocked_particles)
| 40.750462
| 105
| 0.624966
| 3,092
| 22,046
| 4.10414
| 0.078266
| 0.031521
| 0.033333
| 0.034673
| 0.765485
| 0.746887
| 0.735855
| 0.724507
| 0.705122
| 0.697478
| 0
| 0.01168
| 0.293205
| 22,046
| 540
| 106
| 40.825926
| 0.802721
| 0.061372
| 0
| 0.757642
| 0
| 0
| 0.005557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.0131
| null | null | 0.00655
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99bdaab32a76214593c9b2e020d67f873cf67f59
| 44
|
py
|
Python
|
apollo/__init__.py
|
yezl77/pyapollo
|
810f387ccb879e6a77e32b81308f2ab192caec45
|
[
"Apache-2.0"
] | 5
|
2018-07-31T14:57:00.000Z
|
2020-09-11T13:38:57.000Z
|
apollo/__init__.py
|
yezl77/pyapollo
|
810f387ccb879e6a77e32b81308f2ab192caec45
|
[
"Apache-2.0"
] | null | null | null |
apollo/__init__.py
|
yezl77/pyapollo
|
810f387ccb879e6a77e32b81308f2ab192caec45
|
[
"Apache-2.0"
] | 1
|
2018-07-31T14:57:57.000Z
|
2018-07-31T14:57:57.000Z
|
def start():
print("import successful")
| 22
| 30
| 0.659091
| 5
| 44
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 30
| 22
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
413a72e93c9e1b703a41720eb1ac872d1daa4812
| 56,127
|
py
|
Python
|
sdk/python/pulumi_openstack/objectstorage/container_object.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/objectstorage/container_object.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/objectstorage/container_object.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ContainerObjectArgs', 'ContainerObject']
@pulumi.input_type
class ContainerObjectArgs:
def __init__(__self__, *,
container_name: pulumi.Input[str],
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ContainerObject resource.
:param pulumi.Input[str] container_name: A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
:param pulumi.Input[str] content: A string representing the content of the object. Conflicts with
`source` and `copy_from`.
:param pulumi.Input[str] content_disposition: A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
:param pulumi.Input[str] content_encoding: A string representing the value of the Content-Encoding
metadata.
:param pulumi.Input[str] content_type: A string which sets the MIME type for the object.
:param pulumi.Input[str] copy_from: A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
:param pulumi.Input[int] delete_after: An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
:param pulumi.Input[str] delete_at: An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
:param pulumi.Input[bool] detect_content_type: If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
:param pulumi.Input[str] name: A unique name for the object.
:param pulumi.Input[str] object_manifest: A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
:param pulumi.Input[str] region: The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
:param pulumi.Input[str] source: A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
pulumi.set(__self__, "container_name", container_name)
if content is not None:
pulumi.set(__self__, "content", content)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if copy_from is not None:
pulumi.set(__self__, "copy_from", copy_from)
if delete_after is not None:
pulumi.set(__self__, "delete_after", delete_after)
if delete_at is not None:
pulumi.set(__self__, "delete_at", delete_at)
if detect_content_type is not None:
pulumi.set(__self__, "detect_content_type", detect_content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if object_manifest is not None:
pulumi.set(__self__, "object_manifest", object_manifest)
if region is not None:
pulumi.set(__self__, "region", region)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the content of the object. Conflicts with
`source` and `copy_from`.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the value of the Content-Encoding
metadata.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
A string which sets the MIME type for the object.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="copyFrom")
def copy_from(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
"""
return pulumi.get(self, "copy_from")
@copy_from.setter
def copy_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_from", value)
@property
@pulumi.getter(name="deleteAfter")
def delete_after(self) -> Optional[pulumi.Input[int]]:
"""
An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
"""
return pulumi.get(self, "delete_after")
@delete_after.setter
def delete_after(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delete_after", value)
@property
@pulumi.getter(name="deleteAt")
def delete_at(self) -> Optional[pulumi.Input[str]]:
"""
An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
"""
return pulumi.get(self, "delete_at")
@delete_at.setter
def delete_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delete_at", value)
@property
@pulumi.getter(name="detectContentType")
def detect_content_type(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
"""
return pulumi.get(self, "detect_content_type")
@detect_content_type.setter
def detect_content_type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "detect_content_type", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the object.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="objectManifest")
def object_manifest(self) -> Optional[pulumi.Input[str]]:
"""
A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
"""
return pulumi.get(self, "object_manifest")
@object_manifest.setter
def object_manifest(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_manifest", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class _ContainerObjectState:
def __init__(__self__, *,
container_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_length: Optional[pulumi.Input[int]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
date: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
trans_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ContainerObject resources.
:param pulumi.Input[str] container_name: A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
:param pulumi.Input[str] content: A string representing the content of the object. Conflicts with
`source` and `copy_from`.
:param pulumi.Input[str] content_disposition: A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
:param pulumi.Input[str] content_encoding: A string representing the value of the Content-Encoding
metadata.
:param pulumi.Input[int] content_length: If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
:param pulumi.Input[str] content_type: A string which sets the MIME type for the object.
:param pulumi.Input[str] copy_from: A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
:param pulumi.Input[str] date: The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
:param pulumi.Input[int] delete_after: An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
:param pulumi.Input[str] delete_at: An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
:param pulumi.Input[bool] detect_content_type: If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
:param pulumi.Input[str] last_modified: The date and time when the object was last modified. The date and time
stamp format is ISO 8601:
CCYY-MM-DDThh:mm:ss±hh:mm
For example, 2015-08-27T09:49:58-05:00.
The ±hh:mm value, if included, is the time zone as an offset from UTC. In the previous
example, the offset value is -05:00.
:param pulumi.Input[str] name: A unique name for the object.
:param pulumi.Input[str] object_manifest: A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
:param pulumi.Input[str] region: The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
:param pulumi.Input[str] source: A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
:param pulumi.Input[str] trans_id: A unique transaction ID for this request. Your service provider might
need this value if you report a problem.
"""
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if content is not None:
pulumi.set(__self__, "content", content)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_length is not None:
pulumi.set(__self__, "content_length", content_length)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if copy_from is not None:
pulumi.set(__self__, "copy_from", copy_from)
if date is not None:
pulumi.set(__self__, "date", date)
if delete_after is not None:
pulumi.set(__self__, "delete_after", delete_after)
if delete_at is not None:
pulumi.set(__self__, "delete_at", delete_at)
if detect_content_type is not None:
pulumi.set(__self__, "detect_content_type", detect_content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if last_modified is not None:
pulumi.set(__self__, "last_modified", last_modified)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if object_manifest is not None:
pulumi.set(__self__, "object_manifest", object_manifest)
if region is not None:
pulumi.set(__self__, "region", region)
if source is not None:
pulumi.set(__self__, "source", source)
if trans_id is not None:
pulumi.set(__self__, "trans_id", trans_id)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the content of the object. Conflicts with
`source` and `copy_from`.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the value of the Content-Encoding
metadata.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentLength")
def content_length(self) -> Optional[pulumi.Input[int]]:
"""
If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
"""
return pulumi.get(self, "content_length")
@content_length.setter
def content_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "content_length", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
A string which sets the MIME type for the object.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="copyFrom")
def copy_from(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
"""
return pulumi.get(self, "copy_from")
@copy_from.setter
def copy_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_from", value)
@property
@pulumi.getter
def date(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
"""
return pulumi.get(self, "date")
@date.setter
def date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date", value)
@property
@pulumi.getter(name="deleteAfter")
def delete_after(self) -> Optional[pulumi.Input[int]]:
"""
An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
"""
return pulumi.get(self, "delete_after")
@delete_after.setter
def delete_after(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delete_after", value)
@property
@pulumi.getter(name="deleteAt")
def delete_at(self) -> Optional[pulumi.Input[str]]:
"""
An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
"""
return pulumi.get(self, "delete_at")
@delete_at.setter
def delete_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delete_at", value)
@property
@pulumi.getter(name="detectContentType")
def detect_content_type(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
"""
return pulumi.get(self, "detect_content_type")
@detect_content_type.setter
def detect_content_type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "detect_content_type", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> Optional[pulumi.Input[str]]:
"""
The date and time when the object was last modified. The date and time
stamp format is ISO 8601:
CCYY-MM-DDThh:mm:ss±hh:mm
For example, 2015-08-27T09:49:58-05:00.
The ±hh:mm value, if included, is the time zone as an offset from UTC. In the previous
example, the offset value is -05:00.
"""
return pulumi.get(self, "last_modified")
@last_modified.setter
def last_modified(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_modified", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the object.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="objectManifest")
def object_manifest(self) -> Optional[pulumi.Input[str]]:
"""
A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
"""
return pulumi.get(self, "object_manifest")
@object_manifest.setter
def object_manifest(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_manifest", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="transId")
def trans_id(self) -> Optional[pulumi.Input[str]]:
"""
A unique transaction ID for this request. Your service provider might
need this value if you report a problem.
"""
return pulumi.get(self, "trans_id")
@trans_id.setter
def trans_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trans_id", value)
class ContainerObject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a V1 container object resource within OpenStack.
## Example Usage
### Example with simple content
```python
import pulumi
import pulumi_openstack as openstack
container1 = openstack.objectstorage.Container("container1",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
doc1 = openstack.objectstorage.ContainerObject("doc1",
container_name=container1.name,
content=\"\"\" {
"foo" : "bar"
}
\"\"\",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
```
### Example with content from file
```python
import pulumi
import pulumi_openstack as openstack
container1 = openstack.objectstorage.Container("container1",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
doc1 = openstack.objectstorage.ContainerObject("doc1",
container_name=container1.name,
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne",
source="./default.json")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_name: A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
:param pulumi.Input[str] content: A string representing the content of the object. Conflicts with
`source` and `copy_from`.
:param pulumi.Input[str] content_disposition: A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
:param pulumi.Input[str] content_encoding: A string representing the value of the Content-Encoding
metadata.
:param pulumi.Input[str] content_type: A string which sets the MIME type for the object.
:param pulumi.Input[str] copy_from: A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
:param pulumi.Input[int] delete_after: An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
:param pulumi.Input[str] delete_at: An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
:param pulumi.Input[bool] detect_content_type: If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
:param pulumi.Input[str] name: A unique name for the object.
:param pulumi.Input[str] object_manifest: A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
:param pulumi.Input[str] region: The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
:param pulumi.Input[str] source: A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerObjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V1 container object resource within OpenStack.
## Example Usage
### Example with simple content
```python
import pulumi
import pulumi_openstack as openstack
container1 = openstack.objectstorage.Container("container1",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
doc1 = openstack.objectstorage.ContainerObject("doc1",
container_name=container1.name,
content=\"\"\" {
"foo" : "bar"
}
\"\"\",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
```
### Example with content from file
```python
import pulumi
import pulumi_openstack as openstack
container1 = openstack.objectstorage.Container("container1",
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne")
doc1 = openstack.objectstorage.ContainerObject("doc1",
container_name=container1.name,
content_type="application/json",
metadata={
"test": "true",
},
region="RegionOne",
source="./default.json")
```
:param str resource_name: The name of the resource.
:param ContainerObjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerObjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerObjectArgs.__new__(ContainerObjectArgs)
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
__props__.__dict__["content"] = content
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_type"] = content_type
__props__.__dict__["copy_from"] = copy_from
__props__.__dict__["delete_after"] = delete_after
__props__.__dict__["delete_at"] = delete_at
__props__.__dict__["detect_content_type"] = detect_content_type
__props__.__dict__["etag"] = etag
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["object_manifest"] = object_manifest
__props__.__dict__["region"] = region
__props__.__dict__["source"] = source
__props__.__dict__["content_length"] = None
__props__.__dict__["date"] = None
__props__.__dict__["last_modified"] = None
__props__.__dict__["trans_id"] = None
super(ContainerObject, __self__).__init__(
'openstack:objectstorage/containerObject:ContainerObject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_length: Optional[pulumi.Input[int]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
date: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
trans_id: Optional[pulumi.Input[str]] = None) -> 'ContainerObject':
"""
Get an existing ContainerObject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_name: A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
:param pulumi.Input[str] content: A string representing the content of the object. Conflicts with
`source` and `copy_from`.
:param pulumi.Input[str] content_disposition: A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
:param pulumi.Input[str] content_encoding: A string representing the value of the Content-Encoding
metadata.
:param pulumi.Input[int] content_length: If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
:param pulumi.Input[str] content_type: A string which sets the MIME type for the object.
:param pulumi.Input[str] copy_from: A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
:param pulumi.Input[str] date: The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
:param pulumi.Input[int] delete_after: An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
:param pulumi.Input[str] delete_at: An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
:param pulumi.Input[bool] detect_content_type: If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
:param pulumi.Input[str] last_modified: The date and time when the object was last modified. The date and time
stamp format is ISO 8601:
CCYY-MM-DDThh:mm:ss±hh:mm
For example, 2015-08-27T09:49:58-05:00.
The ±hh:mm value, if included, is the time zone as an offset from UTC. In the previous
example, the offset value is -05:00.
:param pulumi.Input[str] name: A unique name for the object.
:param pulumi.Input[str] object_manifest: A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
:param pulumi.Input[str] region: The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
:param pulumi.Input[str] source: A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
:param pulumi.Input[str] trans_id: A unique transaction ID for this request. Your service provider might
need this value if you report a problem.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ContainerObjectState.__new__(_ContainerObjectState)
__props__.__dict__["container_name"] = container_name
__props__.__dict__["content"] = content
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_length"] = content_length
__props__.__dict__["content_type"] = content_type
__props__.__dict__["copy_from"] = copy_from
__props__.__dict__["date"] = date
__props__.__dict__["delete_after"] = delete_after
__props__.__dict__["delete_at"] = delete_at
__props__.__dict__["detect_content_type"] = detect_content_type
__props__.__dict__["etag"] = etag
__props__.__dict__["last_modified"] = last_modified
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["object_manifest"] = object_manifest
__props__.__dict__["region"] = region
__props__.__dict__["source"] = source
__props__.__dict__["trans_id"] = trans_id
return ContainerObject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
"""
A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def content(self) -> pulumi.Output[Optional[str]]:
"""
A string representing the content of the object. Conflicts with
`source` and `copy_from`.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> pulumi.Output[str]:
"""
A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
"""
return pulumi.get(self, "content_disposition")
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> pulumi.Output[str]:
"""
A string representing the value of the Content-Encoding
metadata.
"""
return pulumi.get(self, "content_encoding")
@property
@pulumi.getter(name="contentLength")
def content_length(self) -> pulumi.Output[int]:
"""
If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
"""
return pulumi.get(self, "content_length")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[str]:
"""
A string which sets the MIME type for the object.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="copyFrom")
def copy_from(self) -> pulumi.Output[Optional[str]]:
"""
A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
"""
return pulumi.get(self, "copy_from")
@property
@pulumi.getter
def date(self) -> pulumi.Output[str]:
"""
The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
"""
return pulumi.get(self, "date")
@property
@pulumi.getter(name="deleteAfter")
def delete_after(self) -> pulumi.Output[Optional[int]]:
"""
An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
"""
return pulumi.get(self, "delete_after")
@property
@pulumi.getter(name="deleteAt")
def delete_at(self) -> pulumi.Output[str]:
"""
An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
"""
return pulumi.get(self, "delete_at")
@property
@pulumi.getter(name="detectContentType")
def detect_content_type(self) -> pulumi.Output[Optional[bool]]:
"""
If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
"""
return pulumi.get(self, "detect_content_type")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The date and time when the object was last modified. The date and time
stamp format is ISO 8601:
CCYY-MM-DDThh:mm:ss±hh:mm
For example, 2015-08-27T09:49:58-05:00.
The ±hh:mm value, if included, is the time zone as an offset from UTC. In the previous
example, the offset value is -05:00.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the object.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectManifest")
def object_manifest(self) -> pulumi.Output[str]:
"""
A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
"""
return pulumi.get(self, "object_manifest")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[str]]:
"""
A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="transId")
def trans_id(self) -> pulumi.Output[str]:
"""
A unique transaction ID for this request. Your service provider might
need this value if you report a problem.
"""
return pulumi.get(self, "trans_id")
| 46.271228
| 134
| 0.634561
| 7,025
| 56,127
| 4.930961
| 0.044982
| 0.069544
| 0.071132
| 0.074307
| 0.939636
| 0.929879
| 0.920727
| 0.915242
| 0.911576
| 0.882708
| 0
| 0.009777
| 0.274698
| 56,127
| 1,212
| 135
| 46.309406
| 0.840944
| 0.453881
| 0
| 0.819964
| 1
| 0
| 0.090286
| 0.002081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.167558
| false
| 0.001783
| 0.008913
| 0.005348
| 0.278075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
414c48c6e674e2e17655442b6918903ce560a69e
| 134
|
py
|
Python
|
dagmc_h5m_file_inspector/__init__.py
|
fusion-energy/dagmc_h5m_file_inspector
|
921164cf2e3e04871640cebc3422219c2bd50b74
|
[
"MIT"
] | null | null | null |
dagmc_h5m_file_inspector/__init__.py
|
fusion-energy/dagmc_h5m_file_inspector
|
921164cf2e3e04871640cebc3422219c2bd50b74
|
[
"MIT"
] | null | null | null |
dagmc_h5m_file_inspector/__init__.py
|
fusion-energy/dagmc_h5m_file_inspector
|
921164cf2e3e04871640cebc3422219c2bd50b74
|
[
"MIT"
] | null | null | null |
from .core import get_volumes_from_h5m
from .core import get_materials_from_h5m
from .core import get_volumes_and_materials_from_h5m
| 26.8
| 52
| 0.880597
| 23
| 134
| 4.652174
| 0.347826
| 0.224299
| 0.392523
| 0.476636
| 0.738318
| 0.448598
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.097015
| 134
| 4
| 53
| 33.5
| 0.859504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
414c8298f30a7b847f338288476be8d8195ec872
| 6,227
|
py
|
Python
|
tests/test_state.py
|
dexterous/celery-cloudwatch
|
c894dabf655860275edb1ecd7e1dfac223d78926
|
[
"MIT"
] | 32
|
2015-03-29T17:00:50.000Z
|
2021-01-22T23:26:59.000Z
|
tests/test_state.py
|
dexterous/celery-cloudwatch
|
c894dabf655860275edb1ecd7e1dfac223d78926
|
[
"MIT"
] | 9
|
2015-02-15T12:45:12.000Z
|
2018-03-29T14:05:39.000Z
|
tests/test_state.py
|
dexterous/celery-cloudwatch
|
c894dabf655860275edb1ecd7e1dfac223d78926
|
[
"MIT"
] | 18
|
2015-03-29T17:00:36.000Z
|
2021-12-14T14:53:44.000Z
|
import unittest
from celery_cloudwatch.state import State
class TestPickleErrors(unittest.TestCase):
def assert_success(self, s, task_name):
self.assertEquals(s.task_event_sent[task_name], 1)
self.assertEquals(s.task_event_started[task_name], 1)
self.assertEquals(s.task_event_succeeded[task_name], 1)
self.assertEquals(s.task_event_failed[task_name], 0)
def assert_failure(self, s, task_name):
self.assertEquals(s.task_event_sent[task_name], 1)
self.assertEquals(s.task_event_started[task_name], 1)
self.assertEquals(s.task_event_succeeded[task_name], 0)
self.assertEquals(s.task_event_failed[task_name], 1)
def test_success_0_1_2(self):
s = State()
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 1)
self.assertEquals(total_running.get('t', 0), 0)
s.task_started({'uuid': 'a', 'timestamp': 1})
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 1)
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
self.assert_success(s, 't')
def test_success_0_2_1(self):
s = State()
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
s.task_started({'uuid': 'a', 'timestamp': 1})
self.assert_success(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_success_1_0_2(self):
s = State()
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
self.assert_success(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_success_1_2_0(self):
s = State()
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
self.assert_success(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_success_2_0_1(self):
s = State()
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_started({'uuid': 'a', 'timestamp': 1})
self.assert_success(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_success_2_1_0(self):
s = State()
s.task_succeeded({'uuid': 'a', 'timestamp': 2})
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
self.assert_success(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_failure_0_1_2(self):
s = State()
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_failed({'uuid': 'a', 'timestamp': 2})
self.assert_failure(s, 't')
def test_failure_0_2_1(self):
s = State()
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_failed({'uuid': 'a', 'timestamp': 2})
s.task_started({'uuid': 'a', 'timestamp': 1})
self.assert_failure(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_failure_1_0_2(self):
s = State()
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_failed({'uuid': 'a', 'timestamp': 2})
self.assert_failure(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_failure_1_2_0(self):
s = State()
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_failed({'uuid': 'a', 'timestamp': 2})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
self.assert_failure(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_failure_2_0_1(self):
s = State()
s.task_failed({'uuid': 'a', 'timestamp': 2})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
s.task_started({'uuid': 'a', 'timestamp': 1})
self.assert_failure(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
def test_failure_2_1_0(self):
s = State()
s.task_failed({'uuid': 'a', 'timestamp': 2})
s.task_started({'uuid': 'a', 'timestamp': 1})
s.task_sent({'uuid': 'a', 'name': 't', 'timestamp': 0})
self.assert_failure(s, 't')
total_waiting, total_running = s.num_waiting_running_by_task()
self.assertEquals(total_waiting.get('t', 0), 0)
self.assertEquals(total_running.get('t', 0), 0)
| 43.852113
| 70
| 0.605107
| 880
| 6,227
| 4.021591
| 0.042045
| 0.06499
| 0.154281
| 0.040689
| 0.953377
| 0.953377
| 0.948008
| 0.948008
| 0.897146
| 0.897146
| 0
| 0.027027
| 0.215674
| 6,227
| 141
| 71
| 44.163121
| 0.697584
| 0
| 0
| 0.814516
| 0
| 0
| 0.096691
| 0
| 0
| 0
| 0
| 0
| 0.387097
| 1
| 0.112903
| false
| 0
| 0.016129
| 0
| 0.137097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41b6767e1bcfb6a72ecdcfb98423ab0320c75e52
| 1,614
|
py
|
Python
|
alpyro_msgs/trajectory_msgs/multidofjointtrajectory.py
|
rho2/alpyro_msgs
|
b5a680976c40c83df70d61bb2db1de32a1cde8d3
|
[
"MIT"
] | 1
|
2020-12-13T13:07:10.000Z
|
2020-12-13T13:07:10.000Z
|
alpyro_msgs/trajectory_msgs/multidofjointtrajectory.py
|
rho2/alpyro_msgs
|
b5a680976c40c83df70d61bb2db1de32a1cde8d3
|
[
"MIT"
] | null | null | null |
alpyro_msgs/trajectory_msgs/multidofjointtrajectory.py
|
rho2/alpyro_msgs
|
b5a680976c40c83df70d61bb2db1de32a1cde8d3
|
[
"MIT"
] | null | null | null |
from typing import List
from typing_extensions import Annotated
from alpyro_msgs import RosMessage, string
from alpyro_msgs.std_msgs.header import Header
from alpyro_msgs.trajectory_msgs.multidofjointtrajectorypoint import MultiDOFJointTrajectoryPoint
class MultiDOFJointTrajectory(RosMessage):
__msg_typ__ = "trajectory_msgs/MultiDOFJointTrajectory"
__msg_def__ = "c3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogIHVpbnQzMiBzZXEKICB0aW1lIHN0YW1wCiAgc3RyaW5nIGZyYW1lX2lkCnN0cmluZ1tdIGpvaW50X25hbWVzCnRyYWplY3RvcnlfbXNncy9NdWx0aURPRkpvaW50VHJhamVjdG9yeVBvaW50W10gcG9pbnRzCiAgZ2VvbWV0cnlfbXNncy9UcmFuc2Zvcm1bXSB0cmFuc2Zvcm1zCiAgICBnZW9tZXRyeV9tc2dzL1ZlY3RvcjMgdHJhbnNsYXRpb24KICAgICAgZmxvYXQ2NCB4CiAgICAgIGZsb2F0NjQgeQogICAgICBmbG9hdDY0IHoKICAgIGdlb21ldHJ5X21zZ3MvUXVhdGVybmlvbiByb3RhdGlvbgogICAgICBmbG9hdDY0IHgKICAgICAgZmxvYXQ2NCB5CiAgICAgIGZsb2F0NjQgegogICAgICBmbG9hdDY0IHcKICBnZW9tZXRyeV9tc2dzL1R3aXN0W10gdmVsb2NpdGllcwogICAgZ2VvbWV0cnlfbXNncy9WZWN0b3IzIGxpbmVhcgogICAgICBmbG9hdDY0IHgKICAgICAgZmxvYXQ2NCB5CiAgICAgIGZsb2F0NjQgegogICAgZ2VvbWV0cnlfbXNncy9WZWN0b3IzIGFuZ3VsYXIKICAgICAgZmxvYXQ2NCB4CiAgICAgIGZsb2F0NjQgeQogICAgICBmbG9hdDY0IHoKICBnZW9tZXRyeV9tc2dzL1R3aXN0W10gYWNjZWxlcmF0aW9ucwogICAgZ2VvbWV0cnlfbXNncy9WZWN0b3IzIGxpbmVhcgogICAgICBmbG9hdDY0IHgKICAgICAgZmxvYXQ2NCB5CiAgICAgIGZsb2F0NjQgegogICAgZ2VvbWV0cnlfbXNncy9WZWN0b3IzIGFuZ3VsYXIKICAgICAgZmxvYXQ2NCB4CiAgICAgIGZsb2F0NjQgeQogICAgICBmbG9hdDY0IHoKICBkdXJhdGlvbiB0aW1lX2Zyb21fc3RhcnQKCg=="
__md5_sum__ = "ef145a45a5f47b77b7f5cdde4b16c942"
header: Header
joint_names: Annotated[List[string], 0, 0]
points: Annotated[List[MultiDOFJointTrajectoryPoint], 0, 0]
| 100.875
| 1,082
| 0.942999
| 60
| 1,614
| 24.983333
| 0.45
| 0.020013
| 0.028019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102119
| 0.035316
| 1,614
| 15
| 1,083
| 107.6
| 0.860629
| 0
| 0
| 0
| 0
| 0
| 0.703222
| 0.703222
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.416667
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
41c1f7279ddbf9791bbaa8022fac20b30c80a639
| 7,268
|
py
|
Python
|
c15/p449_httplib2_test2.py
|
HiAwesome/dive-into-python3-practice
|
e57504cb0683ebca9c80b20ff0cb3878bdcc3f87
|
[
"Apache-2.0"
] | null | null | null |
c15/p449_httplib2_test2.py
|
HiAwesome/dive-into-python3-practice
|
e57504cb0683ebca9c80b20ff0cb3878bdcc3f87
|
[
"Apache-2.0"
] | null | null | null |
c15/p449_httplib2_test2.py
|
HiAwesome/dive-into-python3-practice
|
e57504cb0683ebca9c80b20ff0cb3878bdcc3f87
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
import httplib2
from file_path_collect import output_cache_path_dir as cache
httplib2.debuglevel = 1
h = httplib2.Http(cache)
# 网址换成简书
jianshu = 'https://www.jianshu.com/'
# 首先 cache 一遍
response0, content0 = h.request(jianshu)
print()
response, content = h.request(jianshu)
print()
print(len(content))
print()
print(response.status)
print()
print(response.fromcache)
print()
response2, content2 = h.request(jianshu, headers={'cache-control': 'no-cache'})
print()
print(response2.status)
print()
print(response2.fromcache)
print()
pprint(dict(response2.items()))
"""
connect: (www.jianshu.com, 443)
send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\nif-none-match: W/"bbd77e231f5e58fa82c8623683fdc1a1"\r\n\r\n'
reply: 'HTTP/1.1 304 Not Modified\r\n'
header: Server: Tengine
header: Date: Mon, 27 Apr 2020 01:42:48 GMT
header: Connection: keep-alive
header: X-Frame-Options: SAMEORIGIN
header: X-XSS-Protection: 1; mode=block
header: X-Content-Type-Options: nosniff
header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ;
header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1"
header: Cache-Control: max-age=0, private, must-revalidate
header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/
header: Set-Cookie: read_mode=day; path=/
header: Set-Cookie: default_font=font2; path=/
header: Set-Cookie: locale=zh-CN; path=/
header: X-Request-Id: 7d5de11a-339b-4ef1-a3e6-ecdb96bf70a8
header: X-Runtime: 0.015455
header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload
send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\nif-none-match: W/"bbd77e231f5e58fa82c8623683fdc1a1"\r\n\r\n'
reply: 'HTTP/1.1 304 Not Modified\r\n'
header: Server: Tengine
header: Date: Mon, 27 Apr 2020 01:42:48 GMT
header: Connection: keep-alive
header: X-Frame-Options: SAMEORIGIN
header: X-XSS-Protection: 1; mode=block
header: X-Content-Type-Options: nosniff
header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ;
header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1"
header: Cache-Control: max-age=0, private, must-revalidate
header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/
header: Set-Cookie: read_mode=day; path=/
header: Set-Cookie: default_font=font2; path=/
header: Set-Cookie: locale=zh-CN; path=/
header: X-Request-Id: f5fbdb6f-da87-43e3-a5f4-6724b66d3b5f
header: X-Runtime: 0.006987
header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload
23483
200
True
send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\ncache-control: no-cache\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\n\r\n'
reply: 'HTTP/1.1 200 OK\r\n'
header: Server: Tengine
header: Date: Mon, 27 Apr 2020 01:42:48 GMT
header: Content-Type: text/html; charset=utf-8
header: Transfer-Encoding: chunked
header: Connection: keep-alive
header: Vary: Accept-Encoding
header: X-Frame-Options: SAMEORIGIN
header: X-XSS-Protection: 1; mode=block
header: X-Content-Type-Options: nosniff
header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ;
header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1"
header: Cache-Control: max-age=0, private, must-revalidate
header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/
header: Set-Cookie: read_mode=day; path=/
header: Set-Cookie: default_font=font2; path=/
header: Set-Cookie: locale=zh-CN; path=/
header: X-Request-Id: efd5e128-4c05-4da6-87de-3a832d6375eb
header: X-Runtime: 0.004749
header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload
header: Content-Encoding: gzip
200
False
{'-content-encoding': 'gzip',
'cache-control': 'max-age=0, private, must-revalidate',
'connection': 'keep-alive',
'content-length': '23483',
'content-location': 'https://www.jianshu.com/',
'content-security-policy': "script-src 'self' 'unsafe-inline' 'unsafe-eval' "
'*.jianshu.com *.jianshu.io *.nkscdn.com '
'*.huanqiu.com post.star-media.cn api.geetest.com '
'static.geetest.com dn-staticdown.qbox.me '
'zz.bdstatic.com *.google-analytics.com '
'hm.baidu.com nkscdn.com push.zhanzhang.baidu.com '
'res.wx.qq.com qzonestyle.gtimg.cn '
'as.alipayobjects.com nbrecsys.4paradigm.com '
'shared.ydstatic.com gorgon.youdao.com '
'*.googlesyndication.com adservice.google.com '
"www.googletagservices.com ;style-src 'self' "
"'unsafe-inline' *.jianshu.com *.jianshu.io "
'api.geetest.com static.geetest.com '
'shared.ydstatic.com ;',
'content-type': 'text/html; charset=utf-8',
'date': 'Mon, 27 Apr 2020 01:42:48 GMT',
'etag': 'W/"bbd77e231f5e58fa82c8623683fdc1a1"',
'server': 'Tengine',
'set-cookie': 'signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/, '
'read_mode=day; path=/, default_font=font2; path=/, '
'locale=zh-CN; path=/',
'status': '200',
'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',
'transfer-encoding': 'chunked',
'vary': 'Accept-Encoding',
'x-content-type-options': 'nosniff',
'x-frame-options': 'SAMEORIGIN',
'x-request-id': 'efd5e128-4c05-4da6-87de-3a832d6375eb',
'x-runtime': '0.004749',
'x-xss-protection': '1; mode=block'}
"""
| 49.108108
| 603
| 0.718079
| 1,037
| 7,268
| 5.016393
| 0.190935
| 0.034602
| 0.034602
| 0.032872
| 0.809881
| 0.798923
| 0.79431
| 0.782776
| 0.746444
| 0.702999
| 0
| 0.062688
| 0.130848
| 7,268
| 147
| 604
| 49.442177
| 0.760804
| 0.002477
| 0
| 0.347826
| 0
| 0
| 0.077453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.652174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
68cbe97d31e3324032f88dccfed54fc66b602a89
| 19,079
|
py
|
Python
|
src/python/tests/unittests/test_controller/test_scan/test_remote_scanner.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/unittests/test_controller/test_scan/test_remote_scanner.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/unittests/test_controller/test_scan/test_remote_scanner.py
|
annihilatethee/seedsync
|
7a0ba915cc570bc12916088baa6eb6bee6f291c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Inderpreet Singh, All rights reserved.
import unittest
import logging
import sys
from unittest.mock import patch, call, ANY
import tempfile
import os
import pickle
import shutil
from controller.scan import RemoteScanner
from ssh import SshcpError
from common import AppError
from common import Localization
class TestRemoteScanner(unittest.TestCase):
temp_dir = None
temp_scan_script = None
def setUp(self):
ssh_patcher = patch('controller.scan.remote_scanner.Sshcp')
self.addCleanup(ssh_patcher.stop)
self.mock_ssh_cls = ssh_patcher.start()
self.mock_ssh = self.mock_ssh_cls.return_value
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
# Ssh to return mangled binary by default
self.mock_ssh.shell.return_value = b'error'
@classmethod
def setUpClass(cls):
TestRemoteScanner.temp_dir = tempfile.mkdtemp(prefix="test_remote_scanner")
TestRemoteScanner.temp_scan_script = os.path.join(TestRemoteScanner.temp_dir, "script")
with open(TestRemoteScanner.temp_scan_script, "w") as f:
f.write("")
@classmethod
def tearDownClass(cls):
shutil.rmtree(TestRemoteScanner.temp_dir)
def test_correctly_initializes_ssh(self):
self.ssh_args = {}
def mock_ssh_ctor(**kwargs):
self.ssh_args = kwargs
self.mock_ssh_cls.side_effect = mock_ssh_ctor
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.assertIsNotNone(scanner)
self.assertEqual("my remote address", self.ssh_args["host"])
self.assertEqual(1234, self.ssh_args["port"])
self.assertEqual("my remote user", self.ssh_args["user"])
self.assertEqual("my password", self.ssh_args["password"])
def test_installs_scan_script_on_first_scan(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.mock_ssh.copy.assert_called_once_with(
local_path=TestRemoteScanner.temp_scan_script,
remote_path="/remote/path/to/scan/script"
)
self.mock_ssh.copy.reset_mock()
# should not be called the second time
scanner.scan()
self.mock_ssh.copy.assert_not_called()
def test_appends_script_name_to_remote_path(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
# check for appended path ('script')
self.mock_ssh.copy.assert_called_once_with(
local_path=TestRemoteScanner.temp_scan_script,
remote_path="/remote/path/to/scan/script"
)
def test_calls_correct_ssh_md5sum_command(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.assertEqual(2, self.mock_ssh.shell.call_count)
self.mock_ssh.shell.assert_has_calls([
call("echo 'd41d8cd98f00b204e9800998ecf8427e /remote/path/to/scan/script' | md5sum -c --quiet"),
call(ANY)
])
def test_skips_install_on_md5sum_match(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns empty on md5sum, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
return b''
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.mock_ssh.copy.assert_not_called()
self.mock_ssh.copy.reset_mock()
# should not be called the second time either
scanner.scan()
self.mock_ssh.copy.assert_not_called()
def test_installs_scan_script_on_any_md5sum_output(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
return b'some output from md5sum'
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.mock_ssh.copy.assert_called_once_with(
local_path=TestRemoteScanner.temp_scan_script,
remote_path="/remote/path/to/scan/script"
)
self.mock_ssh.copy.reset_mock()
def test_installs_scan_script_on_md5sum_error(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.mock_ssh.copy.assert_called_once_with(
local_path=TestRemoteScanner.temp_scan_script,
remote_path="/remote/path/to/scan/script"
)
self.mock_ssh.copy.reset_mock()
def test_calls_correct_ssh_scan_command(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh returns error for md5sum check, empty pickle dump for later commands
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.assertEqual(2, self.mock_ssh.shell.call_count)
self.mock_ssh.shell.assert_called_with(
"'/remote/path/to/scan/script' '/remote/path/to/scan'"
)
def test_raises_app_error_on_failed_ssh(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh run command raises error the first time, succeeds the second time
# noinspection PyUnusedLocal
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# md5sum check
return b''
elif self.ssh_run_command_count == 2:
# first try
raise SshcpError("an ssh error")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
with self.assertRaises(AppError) as ctx:
scanner.scan()
self.assertEqual(Localization.Error.REMOTE_SERVER_SCAN, str(ctx.exception))
def test_raises_app_error_on_failed_copy(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
# noinspection PyUnusedLocal
def ssh_copy(*args, **kwargs):
raise SshcpError("an scp error")
self.mock_ssh.copy.side_effect = ssh_copy
with self.assertRaises(AppError) as ctx:
scanner.scan()
self.assertEqual(Localization.Error.REMOTE_SERVER_INSTALL, str(ctx.exception))
def test_suppresses_and_retries_on_ssh_error_text_file_busy(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh run command raises error the first time, succeeds the second time
# noinspection PyUnusedLocal
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# md5sum check
return b''
elif self.ssh_run_command_count == 2:
# first try
raise SshcpError("bash: /remote/path/to/scan: Text file busy")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.assertEqual(3, self.mock_ssh.shell.call_count)
def test_fails_after_max_retries_on_suppressed_error(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
# noinspection PyUnusedLocal
def ssh_shell(*args):
raise SshcpError("bash: /remote/path/to/scan: Text file busy")
self.mock_ssh.shell.side_effect = ssh_shell
with self.assertRaises(AppError) as ctx:
scanner.scan()
self.assertEqual(Localization.Error.REMOTE_SERVER_SCAN, str(ctx.exception))
# 7 tries: md5sum check + initial try + 5 retries
self.assertEqual(7, self.mock_ssh.shell.call_count)
def test_suppresses_and_retries_on_ssh_error_exchange_identification(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh run command raises error the first time, succeeds the second time
# noinspection PyUnusedLocal
def ssh_run_command(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count < 2:
raise SshcpError("ssh_exchange_identification: read: Connection reset by peer")
else:
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_run_command
scanner.scan()
self.assertEqual(2, self.mock_ssh.shell.call_count)
def test_suppresses_and_retries_on_ssh_error_cannot_create_temp_dir(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh run command raises error the first time, succeeds the second time
# noinspection PyUnusedLocal
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# md5sum check
return b''
elif self.ssh_run_command_count == 2:
# first try
raise SshcpError("[23033] INTERNAL ERROR: cannot create temporary directory!")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.assertEqual(3, self.mock_ssh.shell.call_count)
def test_suppresses_and_retries_on_ssh_error_connection_timed_out(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
self.ssh_run_command_count = 0
# Ssh run command raises error the first time, succeeds the second time
# noinspection PyUnusedLocal
def ssh_shell(*args):
self.ssh_run_command_count += 1
if self.ssh_run_command_count == 1:
# md5sum check
return b''
elif self.ssh_run_command_count == 2:
# first try
raise SshcpError("connect to host host.remote.com port 2202: Connection timed out")
else:
# later tries
return pickle.dumps([])
self.mock_ssh.shell.side_effect = ssh_shell
scanner.scan()
self.assertEqual(3, self.mock_ssh.shell.call_count)
def test_raises_app_error_on_mangled_output(self):
scanner = RemoteScanner(
remote_address="my remote address",
remote_username="my remote user",
remote_password="my password",
remote_port=1234,
remote_path_to_scan="/remote/path/to/scan",
local_path_to_scan_script=TestRemoteScanner.temp_scan_script,
remote_path_to_scan_script="/remote/path/to/scan/script"
)
def ssh_shell(*args):
return "mangled data".encode()
self.mock_ssh.shell.side_effect = ssh_shell
with self.assertRaises(AppError) as ctx:
scanner.scan()
self.assertEqual(Localization.Error.REMOTE_SERVER_SCAN, str(ctx.exception))
| 37.780198
| 108
| 0.62498
| 2,277
| 19,079
| 4.940711
| 0.087835
| 0.047467
| 0.079111
| 0.103822
| 0.819644
| 0.8128
| 0.804
| 0.798844
| 0.795556
| 0.791378
| 0
| 0.012518
| 0.292363
| 19,079
| 504
| 109
| 37.855159
| 0.820754
| 0.0836
| 0
| 0.736842
| 0
| 0
| 0.127811
| 0.041934
| 0
| 0
| 0
| 0
| 0.076316
| 1
| 0.092105
| false
| 0.044737
| 0.031579
| 0.002632
| 0.181579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ec4d3bc03ac74801721429d9a793e9e024b4d1a7
| 47
|
py
|
Python
|
model/cct/__init__.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | 1
|
2022-03-25T06:48:16.000Z
|
2022-03-25T06:48:16.000Z
|
model/cct/__init__.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | null | null | null |
model/cct/__init__.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | null | null | null |
from .cct import cct_14_7x2_384, cct_14_7x2_224
| 47
| 47
| 0.87234
| 11
| 47
| 3.181818
| 0.636364
| 0.285714
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.325581
| 0.085106
| 47
| 1
| 47
| 47
| 0.488372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
6b4dff3f5193ecca8a9ee7f92cb74b87ef0e16b0
| 46,119
|
py
|
Python
|
gazenet/models/shared_components/gmu/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | 1
|
2021-09-22T07:50:39.000Z
|
2021-09-22T07:50:39.000Z
|
gazenet/models/shared_components/gmu/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | null | null | null |
gazenet/models/shared_components/gmu/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | 1
|
2022-01-14T22:55:38.000Z
|
2022-01-14T22:55:38.000Z
|
# -*- coding: utf-8 -*-
"""This module implements the Gated Multimodal Units in PyTorch
Currently there are two versions:
Two versions, the general GMU and the simplified, bimodal unit
are described in Arevalo et al., Gated multimodal networks, 2020
(https://link.springer.com/article/10.1007/s00521-019-04559-1)
The published code of the authors contains an implementation
of the bimodal version in the Theano framework Bricks.
However, this version is a bit restrictive. It constraints
the input size with the hidden size.
See https://github.com/johnarevalo/gmu-mmimdb/blob/master/model.py
The general GMU and the bimodal version with tied gates
will be implemented here as GMU and GBU.
Now, there is also the GMU Conv2d version in here.
"""
import torch
class GMU(torch.nn.Module):
"""Gated Multimodal Unit, a hidden unit in a neural network that learns
to combine the representation of different modalities into a single one
via gates (similar to LSTM).
h generally refers to the hidden state (i.e. modality information, this is
the naming scheme chosen by the original GMU authors, but I do not like it
that much), while
z generally refers to the gates.
"""
def __init__(
self,
in_features,
out_features,
modalities,
activation=torch.tanh,
gate_activation=torch.sigmoid,
hidden_weight_init=lambda x: torch.nn.init.uniform_(x, -0.01, 0.01),
gate_weight_init=lambda x: torch.nn.init.uniform_(x, -0.01, 0.01),
gate_hidden_interaction=lambda x, y: x * y,
gate_transformation=None,
bias=True,
):
"""Init function.
Args:
in_features (int): vector length of a single modality
out_features (int): number of (hidden) units / output features
modalities (int): number of modalities
activation (torch func): activation function for the modalities
gate_activation (torch func): activation function for the gate
hidden_weight_init (torch init func): init method for the neuronal
weights
gate_weight_init (torch init func): init method for the gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state,
e.g. normalise / gain control them by
lambda x: x / torch.sum(x, 1, keepdim=True)
bias (bool): should the computation contain a
bias (not specified in the original paper)
"""
super(GMU, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.modalities = modalities
self.gates = modalities
self.activation = activation
self.gate_activation = gate_activation
self.hidden_weight_init = hidden_weight_init
self.gate_weight_init = gate_weight_init
self.hidden_bias_init = lambda x: torch.nn.init.uniform_(x, -0.01, 0.01)
self.gate_bias_init = lambda x: torch.nn.init.uniform_(x, -0.01, 0.01)
self.gate_hidden_interaction = gate_hidden_interaction
self.gate_transformation = gate_transformation
self.W_h = self.initialize_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.register_bias(bias)
def register_bias(self, bias):
""" register biases """
if bias:
self.hidden_bias = self.initialize_hidden_bias()
self.gate_bias = self.initialize_gate_bias()
else:
self.register_parameter("hidden_bias", None)
self.register_parameter("gate_bias", None)
def initialize_hidden_bias(self):
"""Initializes hidden weight parameters
Returns:
torch.nn.Parameter
"""
b = torch.nn.Parameter(torch.empty((1, self.modalities, self.out_features)))
self.hidden_bias_init(b)
return b
def initialize_gate_bias(self):
"""Initializes hidden weight parameters
Returns:
torch.nn.Parameter
"""
b = torch.nn.Parameter(torch.empty((1, self.gates, self.out_features)))
self.gate_bias_init(b)
return b
def initialize_hidden_weights(self):
"""Initializes hidden weight parameters
Returns:
torch.nn.Parameter
"""
# each neuron only receives the information of its associated modality
W = torch.nn.Parameter(
torch.empty((1, self.modalities, self.in_features, self.out_features))
)
return self.hidden_weight_init(W)
def initialize_gate_weights(self):
"""Initializes gate weight parameters
Returns:
torch.nn.Parameter
"""
# each gate gets the information of all modalities
W = torch.nn.Parameter(
torch.empty(
(
self.modalities * self.in_features,
self.gates * self.out_features,
)
)
)
return self.gate_weight_init(W)
@staticmethod
def check_input(inputs):
"""Checks if the input is already a Torch tensor,
if it is a list or tuple (hopefully one of the two),
stack them into a tensor
Args:
inputs: input to the layer/cell
Returns:
Torch tensor of size (N,C,self.in_features)
"""
if not isinstance(inputs, torch.Tensor):
inputs = torch.stack(inputs, 1)
return inputs
def get_modality_activation(self, inputs):
"""Processes the the modality information separately with a set of weights
Args:
inputs: input to the layer/cell
Returns:
Torch tensor of size (N,self.modalities,self.out_features)
"""
h = torch.sum(inputs.unsqueeze(-1) * self.W_h, -2)
if self.hidden_bias is not None:
h += self.hidden_bias
h = self.activation(h)
return h
def get_gate_activation(self, inputs):
"""Processes the modality information separately with a set of weights
Args:
inputs: input to the layer/cell
Returns:
Torch tensor of size (N,self.gates,self.out_features)
"""
z = torch.matmul(inputs.view(-1, self.in_features * self.modalities), self.W_z)
if self.gate_bias is not None:
z = z.view(-1, self.gates, self.out_features) + self.gate_bias
z = self.gate_activation(z)
return z
def forward(self, inputs):
"""Calculates the output of the unit
Args:
inputs (torch.Tensors): consisting of
multiple modalities as torch.Tensors in the form NCH.
N is batch size, C is the modalities and H the length
of the modality vectors.
Returns:
A tuple of torch.Tensor of size (N, self.out_features)
"""
inputs = self.check_input(inputs)
h = self.get_modality_activation(inputs)
z = self.get_gate_activation(inputs)
if self.gate_transformation is not None:
z = self.gate_transformation(z)
return torch.sum(self.gate_hidden_interaction(h, z), 1), (h, z)
class GBU(GMU):
"""Gated Bimodal Unit, a hidden unit in a neural network that learns
to combine the representation of two modalities into a single one
via a single gate. See GMU for more general information.
h generally refers to the hidden state, while
z generally refers to the gate.
Note: Since this is a specialised subclass of the GMU, most of the
general behaviour is handled in the GMU class
"""
def __init__(
self,
in_features,
out_features,
activation=None,
gate_activation=None,
hidden_weight_init=None,
gate_weight_init=None,
gate_hidden_interaction=None,
gate_transformation=lambda x: torch.cat((x, (1 - x)), 1),
bias=True,
):
"""Init function.
Args:
out_features (int): number of (hidden) units / output features
in_features (int): vector length of a single modality
activation (torch func, optional): activation function for the
modalities
gate_activation (torch func, optional): activation function for the
gate
hidden_weight_init (torch init func, optional): init method for the
neuronal weights
gate_weight_init (torch init func, optional): init method for the
gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state,
here in the bimodal case, it just concats the activations
with the complementary probabilities
Notes:
# TODO hidden bias inheritance
"""
super(GBU, self).__init__(
in_features=in_features, out_features=out_features, modalities=2
)
if activation:
self.activation = activation
if gate_activation:
self.gate_activation = gate_activation
if hidden_weight_init:
self.hidden_weight_init = hidden_weight_init
if gate_weight_init:
self.gate_weight_init = gate_weight_init
if gate_hidden_interaction:
self.gate_hidden_interaction = gate_hidden_interaction
self.gate_transformation = gate_transformation
self.gates = 1
self.W_h = self.initialize_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.register_bias(bias)
class RGMU(GMU):
"""Recurrent Gated Multimodal Unit, a hidden unit in a neural network that
learns to combine the representation of several modalities into a single one
incorporating recurrent activation over time. See GMU for more general
information.
h generally refers to the hidden state, while
z generally refers to the gate.
h_l are the lateral information from the hidden state, while
z_l are the lateral information from the gate, i.e. the activations
from the last timestep.
Note: Since this is a specialised subclass of the GMU, most of the
general behaviour is handled in the GMU class
"""
def __init__(
self,
in_features,
out_features,
modalities,
recurrent_modalities=True,
recurrent_gates=True,
activation=None,
gate_activation=None,
hidden_weight_init=None,
lateral_hidden_weight_init=None,
gate_weight_init=None,
lateral_gate_weight_init=None,
gate_hidden_interaction=None,
gate_transformation=None,
batch_first=True,
bias=True,
return_sequences=False,
):
"""Init function.
Args:
out_features (int): number of (hidden) units / output features
in_features (int): vector length of a single modality
modalities (int): number of modalities
recurrent_modalities (bool, optional): if modality activation should
incorporate recurrent information
recurrent_gates (bool, optional): if gate activations should
incorporate recurrent information
activation (torch func, optional): activation function for the
modalities
gate_activation (torch func, optional): activation function for the
gate
hidden_weight_init (torch init func, optional): init method for the
neuronal weights
lateral_hidden_weight_init (torch init func, optional): init method
for the recurrent neural weights
gate_weight_init (torch init func, optional): init method for the
gate weights
lateral_gate_weight_init (torch init func, optional): init method
for the recurrent gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state
batch_first (bool): use batch, sequence, feature instead of
sequence, batch, feature
default: True
bias (bool): tbd #todo
return_sequences (bool): if true returns all hidden states
from the intermediate time steps (as a list). The keras/tf
behaviour was the inspiration for that.
"""
super(RGMU, self).__init__(
in_features=in_features,
out_features=out_features,
modalities=modalities,
)
if activation is not None:
self.activation = activation
if gate_activation is not None:
self.gate_activation = gate_activation
if hidden_weight_init is not None:
self.hidden_weight_init = hidden_weight_init
if lateral_hidden_weight_init is None:
self.lateral_hidden_weight_init = hidden_weight_init
else:
self.lateral_hidden_weight_init = lateral_hidden_weight_init
if gate_weight_init is not None:
self.gate_weight_init = gate_weight_init
if lateral_gate_weight_init is None:
self.lateral_gate_weight_init = gate_weight_init
else:
self.lateral_gate_weight_init = lateral_gate_weight_init
if gate_hidden_interaction is not None:
self.gate_hidden_interaction = gate_hidden_interaction
self.gate_transformation = gate_transformation
self.recurrent_modalities = recurrent_modalities
self.recurrent_gates = recurrent_gates
self.W_h = self.initialize_hidden_weights()
self.W_h_l = self.initialize_lateral_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.W_z_l = self.initialize_lateral_gate_weights()
self.batch_first = batch_first
self.register_bias(bias)
self.register_recurrent_bias(bias)
self.return_sequences = return_sequences
def register_recurrent_bias(self, bias):
""" register recurrent biases """
if bias:
self.recurrent_hidden_bias = self.initialize_hidden_bias()
self.recurrent_gate_bias = self.initialize_gate_bias()
else:
self.register_parameter("recurrent_hidden_bias", None)
self.register_parameter("recurrent_gate_bias", None)
def initialize_lateral_state(self, batch_size=1):
"""Initializes lateral state for the first forward pass
Returns:
a tuple of torch.Tensors
"""
h_l = torch.zeros((batch_size, self.modalities, self.out_features), device=self.W_h.device)
z_l = torch.zeros((batch_size, self.gates, self.out_features), device=self.W_h.device)
return h_l, z_l
def initialize_lateral_hidden_weights(self):
"""Initializes lateral hidden weights
Returns:
torch.nn.Parameter
"""
W = torch.nn.Parameter(torch.empty((self.modalities, self.out_features)))
if self.lateral_hidden_weight_init is not None:
self.lateral_hiden_weight_init(W)
return W
def initialize_lateral_gate_weights(self):
"""Initializes lateral gate weight parameters
Returns:
torch.nn.Parameter
"""
W = torch.nn.Parameter(torch.empty((self.gates, self.out_features)))
if self.lateral_gate_weight_init is not None:
self.lateral_gate_weight_init(W)
return W
def get_recurrent_modality_activation(self, inputs, h_l):
"""Processes the the modality information separately with a set of
weights and the weighted recurrent information from the last timestep
Args:
inputs (Torch.Tensor): input to the layer/cell
h_l (Torch.Tensor): activations of the last timestep
Returns:
Torch tensor of size (N,self.modalities,self.out_features)
"""
h = torch.sum(inputs.unsqueeze(-1) * self.W_h, -2) + self.W_h_l * h_l
if self.recurrent_hidden_bias is not None:
h += self.hidden_bias + self.recurrent_hidden_bias
return self.activation(h)
def get_recurrent_gate_activation(self, inputs, z_l):
"""Processes the gate information separately with a set of weights
and the weighted recurrent information from the last timestep
Args:
inputs (Torch.Tensor): input to the layer/cell
z_l (Torch.Tensor): activations of the last timestep
Returns:
Torch tensor of size (N,self.modalities,self.out_features)
"""
z = (
torch.matmul(inputs.view(-1, self.in_features * self.modalities), self.W_z)
+ (self.W_z_l.unsqueeze(0) * z_l).view(-1, self.gates * self.out_features)
).view(-1, self.gates, self.out_features)
if self.recurrent_gate_bias is not None:
z += self.gate_bias + self.recurrent_gate_bias
z = self.gate_activation(z)
if self.gate_transformation is not None:
z = self.gate_transformation(z)
return z
def step(self, inputs, lateral):
"""Calculates the output of one timestep, depending on which of the
parts are recurrent, either modalities, gates or both
Args:
inputs (torch.Tensors): consisting of
multiple modalities as torch.Tensors in the form NCH.
N is batch size, C is the modalities and H the length
of the modality vectors.
lateral (tuple of torch.Tensors): tuple consisting of both,
recurrent modality activations and recurrent gate
activations
Returns:
A tuple of (torch.Tensor of size (N, self.out_features) and
a tuple of (modality and gate activations)).
"""
inputs = self.check_input(inputs)
h_l, z_l = lateral
if self.recurrent_modalities:
h = self.get_recurrent_modality_activation(inputs, h_l)
else:
h = self.get_modality_activation(inputs)
if self.recurrent_gates:
z = self.get_recurrent_gate_activation(inputs, z_l)
else:
z = self.get_gate_activation(inputs)
return torch.sum(self.gate_hidden_interaction(h, z), 1), (h, z)
def forward(self, inputs, lateral=None):
"""Applies the layer computation to the whole sequence
Args:
inputs (torch.Tensors): consisting of
multiple modalities as torch.Tensors in the form
if batch_first: NSCH.
N is batch size, S is sequence, C is the modalities and H the
length
of the modality vectors
else: SNCH
lateral (tuple of torch.Tensors): tuple consisting of both,
recurrent modality activations and recurrent gate
activations, if none is supplied, the lateral is intialized as zeros
Returns:
A tuple of (torch.Tensor of size (N, self.out_features) and
a tuple of (modality (N,modalities,self.out_features) and gate
activations (N,gates,self.out_features)).
If return_sequences, then we follow the batch_first approach,
where the dimensions are N, sequences, self.out_feautures.
The lateral tuples will simply be in a list (for now).
"""
if lateral is None:
lateral = self.initialize_lateral_state()
if self.return_sequences:
output_sequences = []
lateral_sequences = []
if self.batch_first:
for i in range(inputs.shape[1]):
output, lateral = self.step(inputs[:, i], lateral)
if self.return_sequences:
output_sequences.append(output)
lateral_sequences.append(lateral)
else:
for data in inputs:
output, lateral = self.step(data, lateral)
if self.return_sequences:
output_sequences.append(output)
lateral_sequences.append(lateral)
if self.return_sequences:
return torch.stack(output_sequences, 1), lateral_sequences
else:
return output, lateral
class GMUConv2d(torch.nn.Module):
"""Gated Multimodal Unit, a hidden unit in a neural network that learns
to combine the representation of different modalities into a single one
via gates (similar to LSTM).
Here, a specialised version is used that takes as input feature maps,
or general 2d input, convolves over these maps and subsequently,
outputs feature maps.
The only real difference to the non-conv versions is that the states
and values of the units are feature maps and not scalars.
h generally refers to the hidden state, while
z generally refers to the gates.
"""
def __init__(
self,
in_channels,
out_channels,
modalities,
kernel_size,
stride=1,
padding=0,
dilation=1,
activation=torch.tanh,
gate_activation=torch.sigmoid,
hidden_weight_init=lambda x: torch.nn.init.uniform_(x, -0.01, 0.01),
gate_weight_init=lambda x: torch.nn.init.uniform_(x, -0.01, 0.01),
gate_hidden_interaction=lambda x, y: x * y,
gate_transformation=None,
bias=True,
):
"""Init function.
Args:
in_channels (int): number of input channels of each modality
out_channels (int): number of (hidden) units / output feature maps
modalities (int): number of modalities
activation (torch func): activation function for the modalities
gate_activation (torch func): activation function for the gate
weight_init (torch init func): init method for the neuronal weights
gate_weight_init (torch init func): init method for the gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state,
e.g. normalise / gain control them by
lambda x: x / torch.sum(x, 1, keepdim=True)
Note: at the moment, the input feature maps have to be streamlined in
the channel dimension.
i.e. they all have to have the same number of channels.
"""
super(GMUConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modalities = modalities
self.gates = modalities
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.activation = activation
self.gate_activation = gate_activation
self.hidden_weight_init = hidden_weight_init
self.gate_weight_init = gate_weight_init
self.hidden_bias_init = lambda x: torch.nn.init.uniform_(
x, -0.01, 0.01
) # make it as keyword?
self.gate_bias_init = lambda x: torch.nn.init.uniform_(x, -0.01, 0.01)
self.gate_hidden_interaction = gate_hidden_interaction
self.gate_transformation = gate_transformation
self.W_h = self.initialize_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.register_bias(bias)
def register_bias(self, bias):
if bias:
self.hidden_bias = self.initialize_hidden_bias()
self.gate_bias = self.initialize_gate_bias()
else:
self.register_parameter("hidden_bias", None)
self.register_parameter("gate_bias", None)
def initialize_hidden_bias(self):
"""
Returns:
torch.nn.Parameter
"""
b = torch.nn.Parameter(torch.empty((self.modalities * self.out_channels)))
self.hidden_bias_init(b)
return b
def initialize_gate_bias(self):
"""
Returns:
torch.nn.Parameter
"""
b = torch.nn.Parameter(torch.empty((self.gates * self.out_channels)))
self.gate_bias_init(b)
return b
def initialize_gate_weights(self):
"""Initializes gate weight/kernel parameters
Returns:
torch.nn.Parameter
"""
# each gate gets the information of all modalities
# one gate per modality
W = torch.nn.Parameter(
torch.empty(
(
self.gates * self.out_channels,
self.modalities * self.in_channels,
self.kernel_size,
self.kernel_size,
)
)
)
if self.gate_weight_init is not None:
self.gate_weight_init(W)
return W
def initialize_hidden_weights(self):
"""Initializes hidden weight/kernel parameters
Returns:
torch.nn.Parameter
"""
# each neuron only receives the information of its associated modality
W = torch.nn.Parameter(
torch.empty(
(
self.modalities * self.out_channels,
self.in_channels,
self.kernel_size,
self.kernel_size,
)
)
)
if self.hidden_weight_init is not None:
self.hidden_weight_init(W)
return W
def get_modality_activation(self, inputs):
"""Processes the modality information separately with a set of weights
Notes:
The groups parameter is a bit poorly documented. It works as follows: https://mc.ai/how-groups-work-in-pytorch-convolutions/
Args:
inputs: input feature map to the layer/cell
Returns:
Torch tensor of size (N,self.modalities,self.out_channels, *h, *w)
The *height and *weight are determined by the input size and the
use of padding,
dilation, stride etc.
"""
h = self.activation(
torch.nn.functional.conv2d(
inputs,
self.W_h,
self.hidden_bias,
self.stride,
self.padding,
self.dilation,
self.modalities,
)
)
return h.view(h.shape[0], self.modalities, -1, h.shape[-2], h.shape[-1])
def get_gate_activation(self, inputs):
"""Processes the modality information with a set of weights (modalities are not treated separately but together)
Args:
inputs: input feature map to the layer/cell
Returns:
Torch tensor of size (N,self.gates,self.out_channels, *h, *w)
The *height and *weight are determined by the input size and the
use of padding,
dilation, stride etc.
"""
z = self.gate_activation(
torch.nn.functional.conv2d(
inputs,
self.W_z,
self.gate_bias,
self.stride,
self.padding,
self.dilation,
1,
)
)
z = z.view(z.shape[0], self.gates, -1, z.shape[-2], z.shape[-1])
if self.gate_transformation is not None:
z = self.gate_transformation(z)
return z
def forward(self, inputs):
"""Calculates the output of the unit
Args:
inputs (tuple of torch.Tensors): input tuple consisting of
multiple modalities as torch.Tensors in the form NCH.
N is batch size, C is the modalities (as in stacked on top of each other, even if they have multiple channels each) and HW are the sizes of the feature map
Returns:
torch.Tensor of size (N, out_channels, *h, *w)
The *height and *weight are determined by the input size and the
use of padding,
dilation, stride etc.
"""
inputs = GMU.check_input(inputs)
h = self.get_modality_activation(inputs)
z = self.get_gate_activation(inputs)
return torch.sum(self.gate_hidden_interaction(h, z), 1), (h, z)
class GBUConv2d(GMUConv2d):
"""Gated Multimodal Unit, a hidden unit in a neural network that learns
to combine the representation of different modalities into a single one
via gates (similar to LSTM).
Here, a specialised version is used that takes as input feature maps,
or general 2d input, convolves over these maps and subsequently,
outputs feature maps.
The only real difference to the non-conv versions is that the states
and values of the units are feature maps and not scalars.
GBU here indicates that only two modalities are possible for input
and only one gate is used.
h generally refers to the hidden state, while
z generally refers to the gates.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=None,
padding=None,
dilation=None,
activation=None,
gate_activation=None,
hidden_weight_init=None,
gate_weight_init=None,
gate_hidden_interaction=None,
gate_transformation=lambda x: torch.cat((x, (1 - x)), 1),
bias=True,
):
"""Init function.
Args:
in_channels (int): number of input channels of each modality
out_channels (int): number of (hidden) units / output feature maps
modalities (int): number of modalities
activation (torch func): activation function for the modalities
gate_activation (torch func): activation function for the gate
weight_init (torch init func): init method for the neuronal weights
gate_weight_init (torch init func): init method for the gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state,
here in the bimodal case, it just concats the activations
with the complementary probabilites
Note: at the moment, the input feature maps have to be streamlined in
the channel dimension.
i.e. they all have to have the same number of channels.
"""
super(GBUConv2d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
modalities=2,
kernel_size=kernel_size,
)
if stride is not None:
self.stride = stride
if padding is not None:
self.padding = padding
if dilation is not None:
self.dilation = dilation
if activation is not None:
self.activation = activation
if gate_activation is not None:
self.gate_activation = gate_activation
if hidden_weight_init is not None:
self.hidden_weight_init = hidden_weight_init
if gate_weight_init is not None:
self.gate_weight_init = gate_weight_init
if gate_hidden_interaction is not None:
self.gate_hidden_interaction = gate_hidden_interaction
if gate_transformation is not None:
self.gate_transformation = gate_transformation
self.gates = 1
self.W_h = self.initialize_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.register_bias(bias)
class RGMUConv2d(GMUConv2d):
"""Recurrent Gated Multimodal Unit, a hidden unit in a neural network that
learns to combine the representation of different modalities into a
single one via gates (similar to LSTM).
Here, a specialised version is used that takes as input feature maps,
or general 2d input, convolves over these maps and subsequently,
outputs feature maps.
The only real difference to the non-conv versions is that the states
and values of the units are feature maps and not scalars.
Recurrent means that the either the gates or the modalities, or both,
incorporate information from prior timesteps in there processing.
h generally refers to the hidden state, while
z generally refers to the gates.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
modalities,
input_size,
recurrent_modalities=True,
recurrent_gates=True,
stride=None,
padding=None,
dilation=None,
activation=None,
gate_activation=None,
hidden_weight_init=None,
lateral_hidden_weight_init=None,
gate_weight_init=None,
lateral_gate_weight_init=None,
gate_hidden_interaction=None,
gate_transformation=None,
batch_first=True,
return_sequences=False,
bias=True,
device="cuda:0",
):
"""Init function.
Args:
in_channels (int): number of input channels of each modality
out_channels (int): number of (hidden) units / output feature maps
modalities (int): number of modalities
input_size (list or tuple): height and width of the input
recurrent_modalities (bool, optional): if modality activation should
incorporate recurrent information
recurrent_gates (bool, optional): if gate activations should
incorporate recurrent information
activation (torch func): activation function for the modalities
gate_activation (torch func): activation function for the gate
weight_init (torch init func): init method for the neuronal weights
lateral_hidden_weight_init (torch init func, optional): init method
for the recurrent neural weights
gate_weight_init (torch init func): init method for the gate weights
lateral_gate_weight_init (torch init func, optional): init method
for the recurrent gate weights
gate_hidden_interaction (lambda func): how does h and z interact
with another. Could be linear or non-linear (e.g. x * (1+y))
gate_transformation (lambda func): processes the gate
activations before they interact with the hidden state,
here in the bimodal case, it just concats the activations
with the complementary probabilites
device (string): gpu or cpu device
Note: at the moment, the input feature maps have to be streamlined in
the channel dimension.
i.e. they all have to have the same number of channels.
"""
super(RGMUConv2d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
modalities=modalities,
kernel_size=kernel_size,
)
self.device = device
self.height = input_size[0]
self.width = input_size[1]
if stride is not None:
self.stride = stride
if padding is not None:
self.padding = padding
if dilation is not None:
self.dilation = dilation
if activation is not None:
self.activation = activation
if gate_activation is not None:
self.gate_activation = gate_activation
if hidden_weight_init is not None:
self.hidden_weight_init = hidden_weight_init
if lateral_hidden_weight_init is not None:
self.lateral_hidden_weight_init = lateral_hidden_weight_init
else:
self.lateral_hidden_weight_init = self.hidden_weight_init
if gate_weight_init is not None:
self.gate_weight_init = gate_weight_init
if lateral_gate_weight_init is not None:
self.lateral_gate_weight_init = lateral_gate_weight_init
else:
self.lateral_gate_weight_init = self.gate_weight_init
if gate_hidden_interaction is not None:
self.gate_hidden_interaction = gate_hidden_interaction
if gate_transformation is not None:
self.gate_transformation = gate_transformation
self.gates = modalities
self.recurrent_modalities = recurrent_modalities
self.recurrent_gates = recurrent_gates
self.return_sequences = return_sequences
self.batch_first = batch_first
self.W_h = self.initialize_hidden_weights()
self.W_h_l = self.initialize_lateral_hidden_weights()
self.W_z = self.initialize_gate_weights()
self.W_z_l = self.initialize_lateral_gate_weights()
self.register_bias(bias)
self.register_recurrent_bias(bias)
def register_recurrent_bias(self, bias):
if bias:
self.recurrent_hidden_bias = self.initialize_hidden_bias()
self.recurrent_gate_bias = self.initialize_gate_bias()
else:
self.register_parameter("recurrent_hidden_bias", None)
self.register_parameter("recurrent_gate_bias", None)
def initialize_lateral_state(self, batch_size=1):
""" Todo: Docstring"""
h_l = torch.zeros(
(
batch_size,
self.modalities * self.out_channels,
(self.height - self.kernel_size + self.padding * 2) // self.stride + 1,
(self.width - self.kernel_size + self.padding * 2) // self.stride + 1,
)
)
z_l = torch.zeros(
(
batch_size,
self.gates * self.out_channels,
self.height - (self.kernel_size - 1) + self.padding * 2,
self.width - (self.kernel_size - 1) + self.padding * 2,
)
)
return h_l.to(self.device), z_l.to(self.device)
def initialize_lateral_gate_weights(self):
"""Initializes gate weight/kernel parameters
Returns:
torch.nn.Parameter
"""
# the recurrent processing takes as input the output of the gate
# processing, i.e. one feature map per gate, per RGMUCell
W = torch.nn.Parameter(
torch.empty(
(
self.gates * self.out_channels,
1,
self.kernel_size,
self.kernel_size,
)
)
)
if self.gate_weight_init is not None:
self.gate_weight_init(W)
return W
def initialize_lateral_hidden_weights(self):
"""Initializes hidden weight/kernel parameters
Returns:
torch.nn.Parameter
"""
# as input we receive the output of the modality processing,
# i.e. one feature map per modality, per RGMUCell
W = torch.nn.Parameter(
torch.empty(
(
self.modalities * self.out_channels,
1,
self.kernel_size,
self.kernel_size,
)
)
)
if self.hidden_weight_init is not None:
self.hidden_weight_init(W)
return W
def get_recurrent_modality_activation(self, inputs, h_l):
"""Processes the the modality information separately with a set of
weights and the weighted recurrent information from the last timestep
Args:
inputs (Torch.Tensor): input to the layer/cell
h_l (Torch.Tensor): activations of the last timestep
Returns:
Torch tensor of size (N,self.modalities,self.out_channels, *h, *w)
"""
h = self.activation(
torch.nn.functional.conv2d(
inputs,
self.W_h,
self.hidden_bias,
self.stride,
self.padding,
self.dilation,
self.modalities,
)
+ torch.nn.functional.conv2d(
h_l,
self.W_h_l,
self.recurrent_hidden_bias,
self.stride,
(self.kernel_size - 1) // 2,
self.dilation,
self.modalities * self.out_channels,
)
)
return h.view(h.shape[0], self.modalities, -1, h.shape[-2], h.shape[-1])
def get_recurrent_gate_activation(self, inputs, z_l):
"""Processes the gate information separately with a set of weights
and the weighted recurrent information from the last timestep
Args:
inputs (Torch.Tensor): input to the layer/cell
z_l (Torch.Tensor): activations of the last timestep
Returns:
Torch tensor of size (N,self.gates,self.out_channels, *h, *w)
"""
z = self.gate_activation(
torch.nn.functional.conv2d(
inputs,
self.W_z,
self.gate_bias,
self.stride,
self.padding,
self.dilation,
1,
)
+ torch.nn.functional.conv2d(
z_l,
self.W_z_l,
self.recurrent_gate_bias,
self.stride,
(self.kernel_size - 1) // 2,
self.dilation,
self.gates * self.out_channels,
)
)
z = z.view(z.shape[0], self.gates, -1, z.shape[-2], z.shape[-1])
if self.gate_transformation is not None:
z = self.gate_transformation(z)
return z
def step(self, inputs, lateral):
""" Copy from RGMU but adapt to 2D"""
h_l, z_l = lateral
if self.recurrent_modalities:
h = self.get_recurrent_modality_activation(inputs, h_l)
else:
h = self.get_modality_activation(inputs)
if self.recurrent_gates:
z = self.get_recurrent_gate_activation(inputs, z_l)
else:
z = self.get_gate_activation(inputs)
return torch.sum(self.gate_hidden_interaction(h, z), 1), (
h.view(
h.shape[0],
self.modalities * self.out_channels,
h.shape[-2],
h.shape[-1],
),
z.view(
z.shape[0],
self.gates * self.out_channels,
z.shape[-2],
z.shape[-1],
),
)
def forward(self, inputs, lateral=None):
"""# TODO adapt docstring
Args:
inputs (torch.Tensors): consisting of
multiple modalities as torch.Tensors in the form
if batch_first: NSCH.
N is batch size, S is sequence, C is the modalities and H the
length
of the modality vectors
else: SNCH
lateral (tuple of torch.Tensors): tuple consisting of both,
recurrent modality activations and recurrent gate
activations, if none is supplied, the lateral is intialized as zeros
Returns:
A tuple of (torch.Tensor of size (N, self.out_features) and
a tuple of (modality (N,modalities,self.out_features) and gate
activations (N,gates,self.out_features)).
If return_sequences, then we follow the batch_first approach,
where the dimensions are N, sequences, self.out_feautures.
The lateral tuples will simply be in a list (for now).
"""
if lateral is None:
lateral = self.initialize_lateral_state()
# So if you run the code on GPU, it leads to errors.
if self.return_sequences:
output_sequences = []
lateral_sequences = []
if self.batch_first:
for i in range(inputs.shape[1]):
output, lateral = self.step(inputs[:, i], lateral)
if self.return_sequences:
output_sequences.append(output)
lateral_sequences.append(lateral)
else:
for data in inputs:
output, lateral = self.step(data, lateral)
if self.return_sequences:
output_sequences.append(output)
lateral_sequences.append(lateral)
if self.return_sequences:
return torch.stack(output_sequences, 1), lateral_sequences
else:
return output, lateral
if __name__ == "__main__":
# import numpy as np
# np.random.seed(1337)
# from torch.utils.data import Dataset
# import multimodal as mm
rgmuconv_in1_out2_mod3 = RGMUConv2d(
in_channels=1,
out_channels=2,
modalities=3,
kernel_size=3,
input_size=[5, 5],
)
input2d_5x5_c1_mod3_len4 = torch.ones((8, 4, 3, 5, 5))
lat = rgmuconv_in1_out2_mod3.initialize_lateral_state()
h, z = lat
rgmuconv_in1_out2_mod3(input2d_5x5_c1_mod3_len4, lat)
| 37.073151
| 171
| 0.608426
| 5,613
| 46,119
| 4.833779
| 0.07251
| 0.03612
| 0.026832
| 0.014853
| 0.879036
| 0.857106
| 0.834844
| 0.813504
| 0.802521
| 0.768908
| 0
| 0.006827
| 0.323511
| 46,119
| 1,243
| 172
| 37.102977
| 0.862816
| 0.402004
| 0
| 0.736177
| 0
| 0
| 0.005448
| 0.001708
| 0
| 0
| 0
| 0.003218
| 0
| 1
| 0.061611
| false
| 0
| 0.00158
| 0
| 0.121643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b5a716281422ca867c8f8b0a5f1793bab54d58d
| 131
|
py
|
Python
|
chevah/compat/tests/manual/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 5
|
2016-12-03T22:54:50.000Z
|
2021-11-17T11:17:39.000Z
|
chevah/compat/tests/manual/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 76
|
2015-01-22T16:00:31.000Z
|
2022-02-09T22:13:34.000Z
|
chevah/compat/tests/manual/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 1
|
2016-12-10T15:57:31.000Z
|
2016-12-10T15:57:31.000Z
|
"""
Manual tests.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
| 18.714286
| 38
| 0.824427
| 16
| 131
| 5.875
| 0.5625
| 0.319149
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 131
| 6
| 39
| 21.833333
| 0.817391
| 0.099237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6bc750a167e0f472ce43e3874c6b0afa920d8be1
| 173
|
py
|
Python
|
hottbox/utils/validation/__init__.py
|
adamurban98/hottbox
|
26580018ec6d38a1b08266c04ce4408c9e276130
|
[
"Apache-2.0"
] | 167
|
2018-05-07T10:31:00.000Z
|
2022-02-24T19:20:31.000Z
|
hottbox/utils/validation/__init__.py
|
adamurban98/hottbox
|
26580018ec6d38a1b08266c04ce4408c9e276130
|
[
"Apache-2.0"
] | 19
|
2018-05-10T13:26:39.000Z
|
2020-01-31T12:49:27.000Z
|
hottbox/utils/validation/__init__.py
|
adamurban98/hottbox
|
26580018ec6d38a1b08266c04ce4408c9e276130
|
[
"Apache-2.0"
] | 24
|
2018-04-02T17:16:50.000Z
|
2021-12-07T06:21:40.000Z
|
from .checks import is_toeplitz_matrix, is_super_symmetric, is_toeplitz_tensor
__all__ = [
"is_toeplitz_matrix",
"is_super_symmetric",
"is_toeplitz_tensor",
]
| 19.222222
| 78
| 0.757225
| 22
| 173
| 5.227273
| 0.454545
| 0.347826
| 0.278261
| 0.313043
| 0.834783
| 0.834783
| 0.834783
| 0.834783
| 0.834783
| 0
| 0
| 0
| 0.156069
| 173
| 8
| 79
| 21.625
| 0.787671
| 0
| 0
| 0
| 0
| 0
| 0.312139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
6bf6ff73615a07ab5bdfdc7cbe3744532b49cd92
| 2,023
|
py
|
Python
|
digit-dataset/k-means.py
|
MasayukiHigashi/CAT
|
b2690e8d7b8bf0d36422bb9be5feb693d8639c32
|
[
"MIT"
] | 19
|
2019-07-31T03:21:03.000Z
|
2021-11-15T12:33:42.000Z
|
digit-dataset/k-means.py
|
MasayukiHigashi/CAT
|
b2690e8d7b8bf0d36422bb9be5feb693d8639c32
|
[
"MIT"
] | 3
|
2019-08-04T04:11:13.000Z
|
2021-01-20T14:47:23.000Z
|
digit-dataset/k-means.py
|
MasayukiHigashi/CAT
|
b2690e8d7b8bf0d36422bb9be5feb693d8639c32
|
[
"MIT"
] | 9
|
2019-08-03T15:20:37.000Z
|
2021-04-10T22:28:48.000Z
|
import numpy as np
from sklearn.cluster import KMeans
num_classes = 10
dirr = "./"
cat = np.load(dirr+"features_cat.npz")
rev = np.load(dirr+"features_rev.npz")
mstn = np.load(dirr+"features_mstn.npz")
cat_pred = KMeans(n_clusters=num_classes, n_jobs=-1).fit_predict(np.concatenate([cat['x1'],cat['x2']] , 0))
cat_label = np.concatenate([cat['y1'],cat['y2']] , 0)
components = {}
labels = {}
correct = 0
summ = 0
for i in range(num_classes):
components[i] = np.nonzero(cat_pred == i)[0]
#print(components[i].shape)
tmp = []
for j in range(num_classes):
tmp.append((cat_label[components[i]] == j).sum())
#print(tmp)
labels[i] = np.argmax(np.array(tmp))
correct += np.max(np.array(tmp))
summ += np.sum(np.array(tmp))
#print(labels[i])
print(float(correct) / summ)
cat_pred = KMeans(n_clusters=num_classes, n_jobs=-1).fit_predict(np.concatenate([rev['x1'],rev['x2']] , 0))
cat_label = np.concatenate([rev['y1'],rev['y2']] , 0)
components = {}
labels = {}
correct = 0
summ = 0
for i in range(num_classes):
components[i] = np.nonzero(cat_pred == i)[0]
#print(components[i].shape)
tmp = []
for j in range(num_classes):
tmp.append((cat_label[components[i]] == j).sum())
#print(tmp)
labels[i] = np.argmax(np.array(tmp))
correct += np.max(np.array(tmp))
summ += np.sum(np.array(tmp))
#print(labels[i])
print(float(correct) / summ)
cat_pred = KMeans(n_clusters=num_classes, n_jobs=-1).fit_predict(np.concatenate([mstn['x1'],mstn['x2']] , 0))
cat_label = np.concatenate([mstn['y1'],mstn['y2']] , 0)
components = {}
labels = {}
correct = 0
summ = 0
for i in range(num_classes):
components[i] = np.nonzero(cat_pred == i)[0]
#print(components[i].shape)
tmp = []
for j in range(num_classes):
tmp.append((cat_label[components[i]] == j).sum())
#print(tmp)
labels[i] = np.argmax(np.array(tmp))
correct += np.max(np.array(tmp))
summ += np.sum(np.array(tmp))
#print(labels[i])
print(float(correct) / summ)
| 30.19403
| 109
| 0.634207
| 319
| 2,023
| 3.915361
| 0.159875
| 0.080064
| 0.072058
| 0.081665
| 0.843074
| 0.843074
| 0.785428
| 0.785428
| 0.785428
| 0.785428
| 0
| 0.018913
| 0.163618
| 2,023
| 66
| 110
| 30.651515
| 0.719267
| 0.077113
| 0
| 0.75
| 0
| 0
| 0.040366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0.057692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d40aa4b7c4c1322f1ff3a827cd66e9ea94ffb317
| 3,101
|
py
|
Python
|
library/tests/test_compensation.py
|
Sossa24/bmp280-python
|
e718bfb8a3580fda8a735e4d645f0a8fc2eba63d
|
[
"MIT"
] | 39
|
2018-12-04T00:05:52.000Z
|
2022-01-23T18:44:16.000Z
|
library/tests/test_compensation.py
|
Sossa24/bmp280-python
|
e718bfb8a3580fda8a735e4d645f0a8fc2eba63d
|
[
"MIT"
] | 10
|
2018-11-06T12:21:57.000Z
|
2022-02-01T07:10:35.000Z
|
library/tests/test_compensation.py
|
Sossa24/bmp280-python
|
e718bfb8a3580fda8a735e4d645f0a8fc2eba63d
|
[
"MIT"
] | 16
|
2018-11-04T19:57:35.000Z
|
2022-02-04T03:29:12.000Z
|
TEST_TEMP_RAW = 529191
TEST_TEMP_CMP = 24.7894877676
TEST_PRES_RAW = 326816
TEST_PRES_CMP = 1006.61517564
TEST_ALT_CMP = 57.3174
def test_temperature():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_temperature_forced():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup(mode="forced")
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_pressure():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_pressure(), 4) == round(TEST_PRES_CMP, 4)
def test_altitude():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_altitude(), 4) == round(TEST_ALT_CMP, 4)
| 31.323232
| 72
| 0.698484
| 410
| 3,101
| 5.119512
| 0.146341
| 0.060029
| 0.068128
| 0.040019
| 0.89757
| 0.89757
| 0.89757
| 0.89757
| 0.89757
| 0.89757
| 0
| 0.118267
| 0.203805
| 3,101
| 98
| 73
| 31.642857
| 0.731875
| 0.185424
| 0
| 0.79661
| 0
| 0
| 0.002388
| 0
| 0
| 0
| 0.07879
| 0
| 0.067797
| 1
| 0.067797
| false
| 0
| 0.20339
| 0
| 0.271186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d47ad8b1b117342b8b3d4aee9d0870626eb07f11
| 44
|
py
|
Python
|
dags/func.py
|
akaimo/docker_airflow
|
149ca319fd489cdeb1c2ffd499bdb74aae1396be
|
[
"MIT"
] | null | null | null |
dags/func.py
|
akaimo/docker_airflow
|
149ca319fd489cdeb1c2ffd499bdb74aae1396be
|
[
"MIT"
] | null | null | null |
dags/func.py
|
akaimo/docker_airflow
|
149ca319fd489cdeb1c2ffd499bdb74aae1396be
|
[
"MIT"
] | null | null | null |
def sample_func():
print("sample func")
| 14.666667
| 24
| 0.659091
| 6
| 44
| 4.666667
| 0.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 25
| 22
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.